xref: /linux/arch/s390/kvm/kvm-s390.c (revision 31928aa5863e71535ee942f506ca9ac8ce1c4315)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
54f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
58ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
597697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
60ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6769d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
69453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
70453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
71453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
72453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
738a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
75453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
76b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
77453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
78453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
79bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
805288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
827697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
895288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9342cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
95388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
96e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9741628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
98b0c632dbSHeiko Carstens 	{ NULL }
99b0c632dbSHeiko Carstens };
100b0c632dbSHeiko Carstens 
10178c4b59fSMichael Mueller unsigned long *vfacilities;
1022c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
103b0c632dbSHeiko Carstens 
10478c4b59fSMichael Mueller /* test availability of vfacility */
105280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
10678c4b59fSMichael Mueller {
10778c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10878c4b59fSMichael Mueller }
10978c4b59fSMichael Mueller 
110b0c632dbSHeiko Carstens /* Section: not file related */
11113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
112b0c632dbSHeiko Carstens {
113b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
11410474ae8SAlexander Graf 	return 0;
115b0c632dbSHeiko Carstens }
116b0c632dbSHeiko Carstens 
1172c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1182c70fe44SChristian Borntraeger 
119b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
120b0c632dbSHeiko Carstens {
1212c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1222c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
123b0c632dbSHeiko Carstens 	return 0;
124b0c632dbSHeiko Carstens }
125b0c632dbSHeiko Carstens 
126b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
127b0c632dbSHeiko Carstens {
1282c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
13384877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
13484877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens /* Section: device related */
138b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
139b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
140b0c632dbSHeiko Carstens {
141b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
142b0c632dbSHeiko Carstens 		return s390_enable_sie();
143b0c632dbSHeiko Carstens 	return -EINVAL;
144b0c632dbSHeiko Carstens }
145b0c632dbSHeiko Carstens 
146784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
147b0c632dbSHeiko Carstens {
148d7b0b5ebSCarsten Otte 	int r;
149d7b0b5ebSCarsten Otte 
1502bd0ac4eSCarsten Otte 	switch (ext) {
151d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
152b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15352e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1541efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1551efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1561efd0f59SCarsten Otte #endif
1573c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
15860b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
15914eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
160d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
161fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
162ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16310ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
164c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
165d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
16678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
167f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1686352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
169d7b0b5ebSCarsten Otte 		r = 1;
170d7b0b5ebSCarsten Otte 		break;
171e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
172e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
173e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
174e726b1bdSChristian Borntraeger 		break;
175e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
176e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
177e1e2e605SNick Wang 		break;
1781526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
179abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1801526bf9cSChristian Borntraeger 		break;
1812bd0ac4eSCarsten Otte 	default:
182d7b0b5ebSCarsten Otte 		r = 0;
183b0c632dbSHeiko Carstens 	}
184d7b0b5ebSCarsten Otte 	return r;
1852bd0ac4eSCarsten Otte }
186b0c632dbSHeiko Carstens 
18715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18815f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
18915f36ebdSJason J. Herne {
19015f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19115f36ebdSJason J. Herne 	unsigned long address;
19215f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19315f36ebdSJason J. Herne 
19415f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19515f36ebdSJason J. Herne 	/* Loop over all guest pages */
19615f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19715f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19815f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
19915f36ebdSJason J. Herne 
20015f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20115f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20215f36ebdSJason J. Herne 	}
20315f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20415f36ebdSJason J. Herne }
20515f36ebdSJason J. Herne 
206b0c632dbSHeiko Carstens /* Section: vm related */
207b0c632dbSHeiko Carstens /*
208b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
209b0c632dbSHeiko Carstens  */
210b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
212b0c632dbSHeiko Carstens {
21315f36ebdSJason J. Herne 	int r;
21415f36ebdSJason J. Herne 	unsigned long n;
21515f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21615f36ebdSJason J. Herne 	int is_dirty = 0;
21715f36ebdSJason J. Herne 
21815f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
21915f36ebdSJason J. Herne 
22015f36ebdSJason J. Herne 	r = -EINVAL;
22115f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22215f36ebdSJason J. Herne 		goto out;
22315f36ebdSJason J. Herne 
22415f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22515f36ebdSJason J. Herne 	r = -ENOENT;
22615f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22715f36ebdSJason J. Herne 		goto out;
22815f36ebdSJason J. Herne 
22915f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23015f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23115f36ebdSJason J. Herne 	if (r)
23215f36ebdSJason J. Herne 		goto out;
23315f36ebdSJason J. Herne 
23415f36ebdSJason J. Herne 	/* Clear the dirty log */
23515f36ebdSJason J. Herne 	if (is_dirty) {
23615f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23715f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23815f36ebdSJason J. Herne 	}
23915f36ebdSJason J. Herne 	r = 0;
24015f36ebdSJason J. Herne out:
24115f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24215f36ebdSJason J. Herne 	return r;
243b0c632dbSHeiko Carstens }
244b0c632dbSHeiko Carstens 
245d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246d938dc55SCornelia Huck {
247d938dc55SCornelia Huck 	int r;
248d938dc55SCornelia Huck 
249d938dc55SCornelia Huck 	if (cap->flags)
250d938dc55SCornelia Huck 		return -EINVAL;
251d938dc55SCornelia Huck 
252d938dc55SCornelia Huck 	switch (cap->cap) {
25384223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25484223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25584223598SCornelia Huck 		r = 0;
25684223598SCornelia Huck 		break;
257d938dc55SCornelia Huck 	default:
258d938dc55SCornelia Huck 		r = -EINVAL;
259d938dc55SCornelia Huck 		break;
260d938dc55SCornelia Huck 	}
261d938dc55SCornelia Huck 	return r;
262d938dc55SCornelia Huck }
263d938dc55SCornelia Huck 
2644f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2654f718eabSDominik Dingel {
2664f718eabSDominik Dingel 	int ret;
2674f718eabSDominik Dingel 	unsigned int idx;
2684f718eabSDominik Dingel 	switch (attr->attr) {
2694f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2704f718eabSDominik Dingel 		ret = -EBUSY;
2714f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2724f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2734f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2744f718eabSDominik Dingel 			ret = 0;
2754f718eabSDominik Dingel 		}
2764f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2774f718eabSDominik Dingel 		break;
2784f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2794f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2804f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
281a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
2824f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
2834f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2844f718eabSDominik Dingel 		ret = 0;
2854f718eabSDominik Dingel 		break;
2864f718eabSDominik Dingel 	default:
2874f718eabSDominik Dingel 		ret = -ENXIO;
2884f718eabSDominik Dingel 		break;
2894f718eabSDominik Dingel 	}
2904f718eabSDominik Dingel 	return ret;
2914f718eabSDominik Dingel }
2924f718eabSDominik Dingel 
293f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294f2061656SDominik Dingel {
295f2061656SDominik Dingel 	int ret;
296f2061656SDominik Dingel 
297f2061656SDominik Dingel 	switch (attr->group) {
2984f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
2994f718eabSDominik Dingel 		ret = kvm_s390_mem_control(kvm, attr);
3004f718eabSDominik Dingel 		break;
301f2061656SDominik Dingel 	default:
302f2061656SDominik Dingel 		ret = -ENXIO;
303f2061656SDominik Dingel 		break;
304f2061656SDominik Dingel 	}
305f2061656SDominik Dingel 
306f2061656SDominik Dingel 	return ret;
307f2061656SDominik Dingel }
308f2061656SDominik Dingel 
309f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310f2061656SDominik Dingel {
311f2061656SDominik Dingel 	return -ENXIO;
312f2061656SDominik Dingel }
313f2061656SDominik Dingel 
314f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315f2061656SDominik Dingel {
316f2061656SDominik Dingel 	int ret;
317f2061656SDominik Dingel 
318f2061656SDominik Dingel 	switch (attr->group) {
3194f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3204f718eabSDominik Dingel 		switch (attr->attr) {
3214f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3224f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3234f718eabSDominik Dingel 			ret = 0;
3244f718eabSDominik Dingel 			break;
3254f718eabSDominik Dingel 		default:
3264f718eabSDominik Dingel 			ret = -ENXIO;
3274f718eabSDominik Dingel 			break;
3284f718eabSDominik Dingel 		}
3294f718eabSDominik Dingel 		break;
330f2061656SDominik Dingel 	default:
331f2061656SDominik Dingel 		ret = -ENXIO;
332f2061656SDominik Dingel 		break;
333f2061656SDominik Dingel 	}
334f2061656SDominik Dingel 
335f2061656SDominik Dingel 	return ret;
336f2061656SDominik Dingel }
337f2061656SDominik Dingel 
338b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
339b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
340b0c632dbSHeiko Carstens {
341b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
342b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
343f2061656SDominik Dingel 	struct kvm_device_attr attr;
344b0c632dbSHeiko Carstens 	int r;
345b0c632dbSHeiko Carstens 
346b0c632dbSHeiko Carstens 	switch (ioctl) {
347ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
348ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
349ba5c1e9bSCarsten Otte 
350ba5c1e9bSCarsten Otte 		r = -EFAULT;
351ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
352ba5c1e9bSCarsten Otte 			break;
353ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
354ba5c1e9bSCarsten Otte 		break;
355ba5c1e9bSCarsten Otte 	}
356d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
357d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
358d938dc55SCornelia Huck 		r = -EFAULT;
359d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
360d938dc55SCornelia Huck 			break;
361d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362d938dc55SCornelia Huck 		break;
363d938dc55SCornelia Huck 	}
36484223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
36584223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
36684223598SCornelia Huck 
36784223598SCornelia Huck 		r = -EINVAL;
36884223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
36984223598SCornelia Huck 			/* Set up dummy routing. */
37084223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
37184223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
37284223598SCornelia Huck 			r = 0;
37384223598SCornelia Huck 		}
37484223598SCornelia Huck 		break;
37584223598SCornelia Huck 	}
376f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
377f2061656SDominik Dingel 		r = -EFAULT;
378f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379f2061656SDominik Dingel 			break;
380f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
381f2061656SDominik Dingel 		break;
382f2061656SDominik Dingel 	}
383f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
384f2061656SDominik Dingel 		r = -EFAULT;
385f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386f2061656SDominik Dingel 			break;
387f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
388f2061656SDominik Dingel 		break;
389f2061656SDominik Dingel 	}
390f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
391f2061656SDominik Dingel 		r = -EFAULT;
392f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393f2061656SDominik Dingel 			break;
394f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
395f2061656SDominik Dingel 		break;
396f2061656SDominik Dingel 	}
397b0c632dbSHeiko Carstens 	default:
398367e1319SAvi Kivity 		r = -ENOTTY;
399b0c632dbSHeiko Carstens 	}
400b0c632dbSHeiko Carstens 
401b0c632dbSHeiko Carstens 	return r;
402b0c632dbSHeiko Carstens }
403b0c632dbSHeiko Carstens 
4045102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
4055102ee87STony Krowiak {
4065102ee87STony Krowiak 	if (!test_vfacility(76))
4075102ee87STony Krowiak 		return 0;
4085102ee87STony Krowiak 
4095102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
4105102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
4115102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
4125102ee87STony Krowiak 		return -ENOMEM;
4135102ee87STony Krowiak 
4145102ee87STony Krowiak 	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
4155102ee87STony Krowiak 				  CRYCB_FORMAT1;
4165102ee87STony Krowiak 
4175102ee87STony Krowiak 	return 0;
4185102ee87STony Krowiak }
4195102ee87STony Krowiak 
420e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
421b0c632dbSHeiko Carstens {
422b0c632dbSHeiko Carstens 	int rc;
423b0c632dbSHeiko Carstens 	char debug_name[16];
424f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
425b0c632dbSHeiko Carstens 
426e08b9637SCarsten Otte 	rc = -EINVAL;
427e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
428e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
429e08b9637SCarsten Otte 		goto out_err;
430e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
431e08b9637SCarsten Otte 		goto out_err;
432e08b9637SCarsten Otte #else
433e08b9637SCarsten Otte 	if (type)
434e08b9637SCarsten Otte 		goto out_err;
435e08b9637SCarsten Otte #endif
436e08b9637SCarsten Otte 
437b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
438b0c632dbSHeiko Carstens 	if (rc)
439d89f5effSJan Kiszka 		goto out_err;
440b0c632dbSHeiko Carstens 
441b290411aSCarsten Otte 	rc = -ENOMEM;
442b290411aSCarsten Otte 
443b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
444b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
445d89f5effSJan Kiszka 		goto out_err;
446f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
447f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
448f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
449f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
450b0c632dbSHeiko Carstens 
451b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
452b0c632dbSHeiko Carstens 
453b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
454b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
455b0c632dbSHeiko Carstens 		goto out_nodbf;
456b0c632dbSHeiko Carstens 
4575102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
4585102ee87STony Krowiak 		goto out_crypto;
4595102ee87STony Krowiak 
460ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
461ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
4628a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
463a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
464ba5c1e9bSCarsten Otte 
465b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
466b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
467b0c632dbSHeiko Carstens 
468e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
469e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
470e08b9637SCarsten Otte 	} else {
4710349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
472598841caSCarsten Otte 		if (!kvm->arch.gmap)
473598841caSCarsten Otte 			goto out_nogmap;
4742c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
47524eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
476e08b9637SCarsten Otte 	}
477fa6b7fe9SCornelia Huck 
478fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
47984223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
480fa6b7fe9SCornelia Huck 
4818ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
4828ad35755SDavid Hildenbrand 
483d89f5effSJan Kiszka 	return 0;
484598841caSCarsten Otte out_nogmap:
4855102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
4865102ee87STony Krowiak out_crypto:
487598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
488b0c632dbSHeiko Carstens out_nodbf:
489b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
490d89f5effSJan Kiszka out_err:
491d89f5effSJan Kiszka 	return rc;
492b0c632dbSHeiko Carstens }
493b0c632dbSHeiko Carstens 
494d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
495d329c035SChristian Borntraeger {
496d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
497ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
49867335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
4993c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
50058f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
50158f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
50258f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
503abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
504abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
505abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
50658f9460bSCarsten Otte 	}
507abf4a71eSCarsten Otte 	smp_mb();
50827e0393fSCarsten Otte 
50927e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
51027e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
51127e0393fSCarsten Otte 
512b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
513b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
514d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
515b31288faSKonstantin Weitz 
5166692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
517b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
518d329c035SChristian Borntraeger }
519d329c035SChristian Borntraeger 
520d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
521d329c035SChristian Borntraeger {
522d329c035SChristian Borntraeger 	unsigned int i;
523988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
524d329c035SChristian Borntraeger 
525988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
526988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
527988a2caeSGleb Natapov 
528988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
529988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
530d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
531988a2caeSGleb Natapov 
532988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
533988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
534d329c035SChristian Borntraeger }
535d329c035SChristian Borntraeger 
536b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
537b0c632dbSHeiko Carstens {
538d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
539b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
540d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
5415102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
54227e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
543598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
544841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
54567335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
546b0c632dbSHeiko Carstens }
547b0c632dbSHeiko Carstens 
548b0c632dbSHeiko Carstens /* Section: vcpu related */
549b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
550b0c632dbSHeiko Carstens {
5513c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5523c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
55327e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm)) {
554c6c956b8SMartin Schwidefsky 		vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
55527e0393fSCarsten Otte 		if (!vcpu->arch.gmap)
55627e0393fSCarsten Otte 			return -ENOMEM;
5572c70fe44SChristian Borntraeger 		vcpu->arch.gmap->private = vcpu->kvm;
55827e0393fSCarsten Otte 		return 0;
55927e0393fSCarsten Otte 	}
56027e0393fSCarsten Otte 
561598841caSCarsten Otte 	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
56259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
56359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
5649eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
565b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
566b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
567b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
568b0c632dbSHeiko Carstens 	return 0;
569b0c632dbSHeiko Carstens }
570b0c632dbSHeiko Carstens 
571b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
572b0c632dbSHeiko Carstens {
5734725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5744725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
575b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
5764725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5774725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
57859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
579480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
5809e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
581b0c632dbSHeiko Carstens }
582b0c632dbSHeiko Carstens 
583b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
584b0c632dbSHeiko Carstens {
5859e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
586480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
5874725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5884725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
58959674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
5904725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5914725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
592b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
593b0c632dbSHeiko Carstens }
594b0c632dbSHeiko Carstens 
595b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
596b0c632dbSHeiko Carstens {
597b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
598b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
599b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
6008d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
601b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
602b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
603b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
604b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
605b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
606b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
607b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
608b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
609b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
610672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
6113c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
6123c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
6136352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
6146852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
6152ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
616b0c632dbSHeiko Carstens }
617b0c632dbSHeiko Carstens 
618*31928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
61942897d86SMarcelo Tosatti {
62042897d86SMarcelo Tosatti }
62142897d86SMarcelo Tosatti 
6225102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
6235102ee87STony Krowiak {
6245102ee87STony Krowiak 	if (!test_vfacility(76))
6255102ee87STony Krowiak 		return;
6265102ee87STony Krowiak 
6275102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
6285102ee87STony Krowiak }
6295102ee87STony Krowiak 
630b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
631b31605c1SDominik Dingel {
632b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
633b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
634b31605c1SDominik Dingel }
635b31605c1SDominik Dingel 
636b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
637b31605c1SDominik Dingel {
638b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
639b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
640b31605c1SDominik Dingel 		return -ENOMEM;
641b31605c1SDominik Dingel 
642b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
643b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
644b31605c1SDominik Dingel 	return 0;
645b31605c1SDominik Dingel }
646b31605c1SDominik Dingel 
647b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
648b0c632dbSHeiko Carstens {
649b31605c1SDominik Dingel 	int rc = 0;
650b31288faSKonstantin Weitz 
6519e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
6529e6dabefSCornelia Huck 						    CPUSTAT_SM |
65369d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
65469d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
655fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
6567feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
6577feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
6587feb6bb8SMichael Mueller 
65969d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
6604953919fSDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xD1002000U;
661217a4406SHeiko Carstens 	if (sclp_has_siif())
662217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
66378c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
6645a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
6655a5e6536SMatthew Rosato 				      ICTL_TPROT;
6665a5e6536SMatthew Rosato 
667b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
668b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
669b31605c1SDominik Dingel 		if (rc)
670b31605c1SDominik Dingel 			return rc;
671b31288faSKonstantin Weitz 	}
672ca872302SChristian Borntraeger 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
673ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
674453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
67592e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
6765102ee87STony Krowiak 
6775102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
6785102ee87STony Krowiak 
679b31605c1SDominik Dingel 	return rc;
680b0c632dbSHeiko Carstens }
681b0c632dbSHeiko Carstens 
682b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
683b0c632dbSHeiko Carstens 				      unsigned int id)
684b0c632dbSHeiko Carstens {
6854d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
6867feb6bb8SMichael Mueller 	struct sie_page *sie_page;
6874d47555aSCarsten Otte 	int rc = -EINVAL;
688b0c632dbSHeiko Carstens 
6894d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
6904d47555aSCarsten Otte 		goto out;
6914d47555aSCarsten Otte 
6924d47555aSCarsten Otte 	rc = -ENOMEM;
6934d47555aSCarsten Otte 
694b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
695b0c632dbSHeiko Carstens 	if (!vcpu)
6964d47555aSCarsten Otte 		goto out;
697b0c632dbSHeiko Carstens 
6987feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
6997feb6bb8SMichael Mueller 	if (!sie_page)
700b0c632dbSHeiko Carstens 		goto out_free_cpu;
701b0c632dbSHeiko Carstens 
7027feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
7037feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
7047feb6bb8SMichael Mueller 
705b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
70658f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
70758f9460bSCarsten Otte 		if (!kvm->arch.sca) {
70858f9460bSCarsten Otte 			WARN_ON_ONCE(1);
70958f9460bSCarsten Otte 			goto out_free_cpu;
71058f9460bSCarsten Otte 		}
711abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
71258f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
71358f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
71458f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
71558f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
716b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
717fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
71858f9460bSCarsten Otte 	}
719b0c632dbSHeiko Carstens 
720ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
721ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
722d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7235288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
724ba5c1e9bSCarsten Otte 
725b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
726b0c632dbSHeiko Carstens 	if (rc)
7277b06bf2fSWei Yongjun 		goto out_free_sie_block;
728b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
729b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
730ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
731b0c632dbSHeiko Carstens 
732b0c632dbSHeiko Carstens 	return vcpu;
7337b06bf2fSWei Yongjun out_free_sie_block:
7347b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
735b0c632dbSHeiko Carstens out_free_cpu:
736b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
7374d47555aSCarsten Otte out:
738b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
739b0c632dbSHeiko Carstens }
740b0c632dbSHeiko Carstens 
741b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
742b0c632dbSHeiko Carstens {
743f87618e8SMichael Mueller 	return kvm_cpu_has_interrupt(vcpu);
744b0c632dbSHeiko Carstens }
745b0c632dbSHeiko Carstens 
74649b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
74749b99e1eSChristian Borntraeger {
74849b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
74949b99e1eSChristian Borntraeger }
75049b99e1eSChristian Borntraeger 
75149b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
75249b99e1eSChristian Borntraeger {
75349b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
75449b99e1eSChristian Borntraeger }
75549b99e1eSChristian Borntraeger 
75649b99e1eSChristian Borntraeger /*
75749b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
75849b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
75949b99e1eSChristian Borntraeger  * return immediately. */
76049b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
76149b99e1eSChristian Borntraeger {
76249b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
76349b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
76449b99e1eSChristian Borntraeger 		cpu_relax();
76549b99e1eSChristian Borntraeger }
76649b99e1eSChristian Borntraeger 
76749b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
76849b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
76949b99e1eSChristian Borntraeger {
77049b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
77149b99e1eSChristian Borntraeger 	exit_sie(vcpu);
77249b99e1eSChristian Borntraeger }
77349b99e1eSChristian Borntraeger 
7742c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
7752c70fe44SChristian Borntraeger {
7762c70fe44SChristian Borntraeger 	int i;
7772c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
7782c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
7792c70fe44SChristian Borntraeger 
7802c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
7812c70fe44SChristian Borntraeger 		/* match against both prefix pages */
782fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
7832c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
7842c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
7852c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
7862c70fe44SChristian Borntraeger 		}
7872c70fe44SChristian Borntraeger 	}
7882c70fe44SChristian Borntraeger }
7892c70fe44SChristian Borntraeger 
790b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
791b6d33834SChristoffer Dall {
792b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
793b6d33834SChristoffer Dall 	BUG();
794b6d33834SChristoffer Dall 	return 0;
795b6d33834SChristoffer Dall }
796b6d33834SChristoffer Dall 
79714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
79814eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
79914eebd91SCarsten Otte {
80014eebd91SCarsten Otte 	int r = -EINVAL;
80114eebd91SCarsten Otte 
80214eebd91SCarsten Otte 	switch (reg->id) {
80329b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
80429b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
80529b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
80629b7c71bSCarsten Otte 		break;
80729b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
80829b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
80929b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
81029b7c71bSCarsten Otte 		break;
81146a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
81246a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
81346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
81446a6dd1cSJason J. herne 		break;
81546a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
81646a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
81746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
81846a6dd1cSJason J. herne 		break;
819536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
820536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
821536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
822536336c2SDominik Dingel 		break;
823536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
824536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
825536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
826536336c2SDominik Dingel 		break;
827536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
828536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
829536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
830536336c2SDominik Dingel 		break;
831672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
832672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
833672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
834672550fbSChristian Borntraeger 		break;
835afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
836afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
837afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
838afa45ff5SChristian Borntraeger 		break;
83914eebd91SCarsten Otte 	default:
84014eebd91SCarsten Otte 		break;
84114eebd91SCarsten Otte 	}
84214eebd91SCarsten Otte 
84314eebd91SCarsten Otte 	return r;
84414eebd91SCarsten Otte }
84514eebd91SCarsten Otte 
84614eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
84714eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
84814eebd91SCarsten Otte {
84914eebd91SCarsten Otte 	int r = -EINVAL;
85014eebd91SCarsten Otte 
85114eebd91SCarsten Otte 	switch (reg->id) {
85229b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
85329b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
85429b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
85529b7c71bSCarsten Otte 		break;
85629b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
85729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
85829b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
85929b7c71bSCarsten Otte 		break;
86046a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
86146a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
86246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
86346a6dd1cSJason J. herne 		break;
86446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
86546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
86646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
86746a6dd1cSJason J. herne 		break;
868536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
869536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
870536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
871536336c2SDominik Dingel 		break;
872536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
873536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
874536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
875536336c2SDominik Dingel 		break;
876536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
877536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
878536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
879536336c2SDominik Dingel 		break;
880672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
881672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
882672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
883672550fbSChristian Borntraeger 		break;
884afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
885afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
886afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
887afa45ff5SChristian Borntraeger 		break;
88814eebd91SCarsten Otte 	default:
88914eebd91SCarsten Otte 		break;
89014eebd91SCarsten Otte 	}
89114eebd91SCarsten Otte 
89214eebd91SCarsten Otte 	return r;
89314eebd91SCarsten Otte }
894b6d33834SChristoffer Dall 
895b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
896b0c632dbSHeiko Carstens {
897b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
898b0c632dbSHeiko Carstens 	return 0;
899b0c632dbSHeiko Carstens }
900b0c632dbSHeiko Carstens 
901b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
902b0c632dbSHeiko Carstens {
9035a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
904b0c632dbSHeiko Carstens 	return 0;
905b0c632dbSHeiko Carstens }
906b0c632dbSHeiko Carstens 
907b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
908b0c632dbSHeiko Carstens {
9095a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
910b0c632dbSHeiko Carstens 	return 0;
911b0c632dbSHeiko Carstens }
912b0c632dbSHeiko Carstens 
913b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
914b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
915b0c632dbSHeiko Carstens {
91659674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
917b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
91859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
919b0c632dbSHeiko Carstens 	return 0;
920b0c632dbSHeiko Carstens }
921b0c632dbSHeiko Carstens 
922b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
923b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
924b0c632dbSHeiko Carstens {
92559674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
926b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
927b0c632dbSHeiko Carstens 	return 0;
928b0c632dbSHeiko Carstens }
929b0c632dbSHeiko Carstens 
930b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
931b0c632dbSHeiko Carstens {
9324725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
9334725c860SMartin Schwidefsky 		return -EINVAL;
934b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
9354725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
9364725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
9374725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
938b0c632dbSHeiko Carstens 	return 0;
939b0c632dbSHeiko Carstens }
940b0c632dbSHeiko Carstens 
941b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
942b0c632dbSHeiko Carstens {
943b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
944b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
945b0c632dbSHeiko Carstens 	return 0;
946b0c632dbSHeiko Carstens }
947b0c632dbSHeiko Carstens 
948b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
949b0c632dbSHeiko Carstens {
950b0c632dbSHeiko Carstens 	int rc = 0;
951b0c632dbSHeiko Carstens 
9527a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
953b0c632dbSHeiko Carstens 		rc = -EBUSY;
954d7b0b5ebSCarsten Otte 	else {
955d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
956d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
957d7b0b5ebSCarsten Otte 	}
958b0c632dbSHeiko Carstens 	return rc;
959b0c632dbSHeiko Carstens }
960b0c632dbSHeiko Carstens 
961b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
962b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
963b0c632dbSHeiko Carstens {
964b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
965b0c632dbSHeiko Carstens }
966b0c632dbSHeiko Carstens 
96727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
96827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
96927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
97027291e21SDavid Hildenbrand 
971d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
972d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
973b0c632dbSHeiko Carstens {
97427291e21SDavid Hildenbrand 	int rc = 0;
97527291e21SDavid Hildenbrand 
97627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
97727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
97827291e21SDavid Hildenbrand 
9792de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
98027291e21SDavid Hildenbrand 		return -EINVAL;
98127291e21SDavid Hildenbrand 
98227291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
98327291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
98427291e21SDavid Hildenbrand 		/* enforce guest PER */
98527291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
98627291e21SDavid Hildenbrand 
98727291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
98827291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
98927291e21SDavid Hildenbrand 	} else {
99027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
99127291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
99227291e21SDavid Hildenbrand 	}
99327291e21SDavid Hildenbrand 
99427291e21SDavid Hildenbrand 	if (rc) {
99527291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
99627291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
99727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
99827291e21SDavid Hildenbrand 	}
99927291e21SDavid Hildenbrand 
100027291e21SDavid Hildenbrand 	return rc;
1001b0c632dbSHeiko Carstens }
1002b0c632dbSHeiko Carstens 
100362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
100462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
100562d9f0dbSMarcelo Tosatti {
10066352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
10076352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
10086352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
100962d9f0dbSMarcelo Tosatti }
101062d9f0dbSMarcelo Tosatti 
101162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
101262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
101362d9f0dbSMarcelo Tosatti {
10146352e4d2SDavid Hildenbrand 	int rc = 0;
10156352e4d2SDavid Hildenbrand 
10166352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
10176352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
10186352e4d2SDavid Hildenbrand 
10196352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
10206352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
10216352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10226352e4d2SDavid Hildenbrand 		break;
10236352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
10246352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
10256352e4d2SDavid Hildenbrand 		break;
10266352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
10276352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
10286352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
10296352e4d2SDavid Hildenbrand 	default:
10306352e4d2SDavid Hildenbrand 		rc = -ENXIO;
10316352e4d2SDavid Hildenbrand 	}
10326352e4d2SDavid Hildenbrand 
10336352e4d2SDavid Hildenbrand 	return rc;
103462d9f0dbSMarcelo Tosatti }
103562d9f0dbSMarcelo Tosatti 
1036b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1037b31605c1SDominik Dingel {
1038b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1039b31605c1SDominik Dingel 		return false;
1040b31605c1SDominik Dingel 	/* only enable for z10 and later */
1041b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1042b31605c1SDominik Dingel 		return false;
1043b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1044b31605c1SDominik Dingel 		return false;
1045b31605c1SDominik Dingel 	return true;
1046b31605c1SDominik Dingel }
1047b31605c1SDominik Dingel 
10488ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
10498ad35755SDavid Hildenbrand {
10508ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
10518ad35755SDavid Hildenbrand }
10528ad35755SDavid Hildenbrand 
10532c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
10542c70fe44SChristian Borntraeger {
10558ad35755SDavid Hildenbrand retry:
10568ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
10572c70fe44SChristian Borntraeger 	/*
10582c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
10592c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
10602c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
10612c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
10622c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
10632c70fe44SChristian Borntraeger 	 */
10648ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
10652c70fe44SChristian Borntraeger 		int rc;
10662c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1067fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
10682c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
10692c70fe44SChristian Borntraeger 		if (rc)
10702c70fe44SChristian Borntraeger 			return rc;
10718ad35755SDavid Hildenbrand 		goto retry;
10722c70fe44SChristian Borntraeger 	}
10738ad35755SDavid Hildenbrand 
1074d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1075d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1076d3d692c8SDavid Hildenbrand 		goto retry;
1077d3d692c8SDavid Hildenbrand 	}
1078d3d692c8SDavid Hildenbrand 
10798ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
10808ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
10818ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
10828ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
10838ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
10848ad35755SDavid Hildenbrand 		}
10858ad35755SDavid Hildenbrand 		goto retry;
10868ad35755SDavid Hildenbrand 	}
10878ad35755SDavid Hildenbrand 
10888ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
10898ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
10908ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
10918ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
10928ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
10938ad35755SDavid Hildenbrand 		}
10948ad35755SDavid Hildenbrand 		goto retry;
10958ad35755SDavid Hildenbrand 	}
10968ad35755SDavid Hildenbrand 
10970759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
10980759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
10990759d068SDavid Hildenbrand 
11002c70fe44SChristian Borntraeger 	return 0;
11012c70fe44SChristian Borntraeger }
11022c70fe44SChristian Borntraeger 
1103fa576c58SThomas Huth /**
1104fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1105fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1106fa576c58SThomas Huth  * @gpa: Guest physical address
1107fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1108fa576c58SThomas Huth  *
1109fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1110fa576c58SThomas Huth  *
1111fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1112fa576c58SThomas Huth  */
1113fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
111424eb3a82SDominik Dingel {
1115527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1116527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
111724eb3a82SDominik Dingel }
111824eb3a82SDominik Dingel 
11193c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
11203c038e6bSDominik Dingel 				      unsigned long token)
11213c038e6bSDominik Dingel {
11223c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1123383d0b05SJens Freimann 	struct kvm_s390_irq irq;
11243c038e6bSDominik Dingel 
11253c038e6bSDominik Dingel 	if (start_token) {
1126383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1127383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1128383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
11293c038e6bSDominik Dingel 	} else {
11303c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1131383d0b05SJens Freimann 		inti.parm64 = token;
11323c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
11333c038e6bSDominik Dingel 	}
11343c038e6bSDominik Dingel }
11353c038e6bSDominik Dingel 
11363c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
11373c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
11383c038e6bSDominik Dingel {
11393c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
11403c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
11413c038e6bSDominik Dingel }
11423c038e6bSDominik Dingel 
11433c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
11443c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
11453c038e6bSDominik Dingel {
11463c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
11473c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
11483c038e6bSDominik Dingel }
11493c038e6bSDominik Dingel 
11503c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
11513c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
11523c038e6bSDominik Dingel {
11533c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
11543c038e6bSDominik Dingel }
11553c038e6bSDominik Dingel 
11563c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
11573c038e6bSDominik Dingel {
11583c038e6bSDominik Dingel 	/*
11593c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
11603c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
11613c038e6bSDominik Dingel 	 */
11623c038e6bSDominik Dingel 	return true;
11633c038e6bSDominik Dingel }
11643c038e6bSDominik Dingel 
11653c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
11663c038e6bSDominik Dingel {
11673c038e6bSDominik Dingel 	hva_t hva;
11683c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
11693c038e6bSDominik Dingel 	int rc;
11703c038e6bSDominik Dingel 
11713c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
11723c038e6bSDominik Dingel 		return 0;
11733c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
11743c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
11753c038e6bSDominik Dingel 		return 0;
11763c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
11773c038e6bSDominik Dingel 		return 0;
11783c038e6bSDominik Dingel 	if (kvm_cpu_has_interrupt(vcpu))
11793c038e6bSDominik Dingel 		return 0;
11803c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
11813c038e6bSDominik Dingel 		return 0;
11823c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
11833c038e6bSDominik Dingel 		return 0;
11843c038e6bSDominik Dingel 
118581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
118681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
118781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
11883c038e6bSDominik Dingel 		return 0;
11893c038e6bSDominik Dingel 
11903c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
11913c038e6bSDominik Dingel 	return rc;
11923c038e6bSDominik Dingel }
11933c038e6bSDominik Dingel 
11943fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1195b0c632dbSHeiko Carstens {
11963fb4c40fSThomas Huth 	int rc, cpuflags;
1197e168bf8dSCarsten Otte 
11983c038e6bSDominik Dingel 	/*
11993c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
12003c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
12013c038e6bSDominik Dingel 	 * handled outside the worker.
12023c038e6bSDominik Dingel 	 */
12033c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
12043c038e6bSDominik Dingel 
12055a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1206b0c632dbSHeiko Carstens 
1207b0c632dbSHeiko Carstens 	if (need_resched())
1208b0c632dbSHeiko Carstens 		schedule();
1209b0c632dbSHeiko Carstens 
1210d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
121171cde587SChristian Borntraeger 		s390_handle_mcck();
121271cde587SChristian Borntraeger 
121379395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
121479395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
121579395031SJens Freimann 		if (rc)
121679395031SJens Freimann 			return rc;
121779395031SJens Freimann 	}
12180ff31867SCarsten Otte 
12192c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
12202c70fe44SChristian Borntraeger 	if (rc)
12212c70fe44SChristian Borntraeger 		return rc;
12222c70fe44SChristian Borntraeger 
122327291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
122427291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
122527291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
122627291e21SDavid Hildenbrand 	}
122727291e21SDavid Hildenbrand 
1228b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
12293fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
12303fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
12313fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
12322b29a9fdSDominik Dingel 
12333fb4c40fSThomas Huth 	return 0;
12343fb4c40fSThomas Huth }
12353fb4c40fSThomas Huth 
12363fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
12373fb4c40fSThomas Huth {
123824eb3a82SDominik Dingel 	int rc = -1;
12392b29a9fdSDominik Dingel 
12402b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
12412b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
12422b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
12432b29a9fdSDominik Dingel 
124427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
124527291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
124627291e21SDavid Hildenbrand 
12473fb4c40fSThomas Huth 	if (exit_reason >= 0) {
12487c470539SMartin Schwidefsky 		rc = 0;
1249210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1250210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1251210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1252210b1607SThomas Huth 						current->thread.gmap_addr;
1253210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1254210b1607SThomas Huth 		rc = -EREMOTE;
125524eb3a82SDominik Dingel 
125624eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
12573c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
125824eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1259fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
126024eb3a82SDominik Dingel 			rc = 0;
1261fa576c58SThomas Huth 		} else {
1262fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1263fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1264fa576c58SThomas Huth 		}
126524eb3a82SDominik Dingel 	}
126624eb3a82SDominik Dingel 
126724eb3a82SDominik Dingel 	if (rc == -1) {
1268699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1269699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1270699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
12711f0d0f09SCarsten Otte 	}
1272b0c632dbSHeiko Carstens 
12735a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
12743fb4c40fSThomas Huth 
1275a76ccff6SThomas Huth 	if (rc == 0) {
1276a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
12772955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
12782955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1279a76ccff6SThomas Huth 		else
1280a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1281a76ccff6SThomas Huth 	}
1282a76ccff6SThomas Huth 
12833fb4c40fSThomas Huth 	return rc;
12843fb4c40fSThomas Huth }
12853fb4c40fSThomas Huth 
12863fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
12873fb4c40fSThomas Huth {
12883fb4c40fSThomas Huth 	int rc, exit_reason;
12893fb4c40fSThomas Huth 
1290800c1065SThomas Huth 	/*
1291800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1292800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1293800c1065SThomas Huth 	 */
1294800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1295800c1065SThomas Huth 
1296a76ccff6SThomas Huth 	do {
12973fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
12983fb4c40fSThomas Huth 		if (rc)
1299a76ccff6SThomas Huth 			break;
13003fb4c40fSThomas Huth 
1301800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
13023fb4c40fSThomas Huth 		/*
1303a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1304a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
13053fb4c40fSThomas Huth 		 */
13063fb4c40fSThomas Huth 		preempt_disable();
13073fb4c40fSThomas Huth 		kvm_guest_enter();
13083fb4c40fSThomas Huth 		preempt_enable();
1309a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1310a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
13113fb4c40fSThomas Huth 		kvm_guest_exit();
1312800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
13133fb4c40fSThomas Huth 
13143fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
131527291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
13163fb4c40fSThomas Huth 
1317800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1318e168bf8dSCarsten Otte 	return rc;
1319b0c632dbSHeiko Carstens }
1320b0c632dbSHeiko Carstens 
1321b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1322b028ee3eSDavid Hildenbrand {
1323b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1324b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1325b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1326b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1327b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1328b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1329d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1330d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1331b028ee3eSDavid Hildenbrand 	}
1332b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1333b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1334b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1335b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1336b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1337b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1338b028ee3eSDavid Hildenbrand 	}
1339b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1340b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1341b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1342b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1343b028ee3eSDavid Hildenbrand 	}
1344b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1345b028ee3eSDavid Hildenbrand }
1346b028ee3eSDavid Hildenbrand 
1347b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1348b028ee3eSDavid Hildenbrand {
1349b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1350b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1351b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1352b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1353b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1354b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1355b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1356b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1357b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1358b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1359b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1360b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1361b028ee3eSDavid Hildenbrand }
1362b028ee3eSDavid Hildenbrand 
1363b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1364b0c632dbSHeiko Carstens {
13658f2abe6aSChristian Borntraeger 	int rc;
1366b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1367b0c632dbSHeiko Carstens 
136827291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
136927291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
137027291e21SDavid Hildenbrand 		return 0;
137127291e21SDavid Hildenbrand 	}
137227291e21SDavid Hildenbrand 
1373b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1374b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1375b0c632dbSHeiko Carstens 
13766352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
13776852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
13786352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
13796352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
13806352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
13816352e4d2SDavid Hildenbrand 		return -EINVAL;
13826352e4d2SDavid Hildenbrand 	}
1383b0c632dbSHeiko Carstens 
1384b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1385d7b0b5ebSCarsten Otte 
1386dab4079dSHeiko Carstens 	might_fault();
1387e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
13889ace903dSChristian Ehrhardt 
1389b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1390b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
13918f2abe6aSChristian Borntraeger 		rc = -EINTR;
1392b1d16c49SChristian Ehrhardt 	}
13938f2abe6aSChristian Borntraeger 
139427291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
139527291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
139627291e21SDavid Hildenbrand 		rc = 0;
139727291e21SDavid Hildenbrand 	}
139827291e21SDavid Hildenbrand 
1399b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
14008f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
14018f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
14028f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
14038f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
14048f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
14058f2abe6aSChristian Borntraeger 		rc = 0;
14068f2abe6aSChristian Borntraeger 	}
14078f2abe6aSChristian Borntraeger 
14088f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
14098f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
14108f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
14118f2abe6aSChristian Borntraeger 		rc = 0;
14128f2abe6aSChristian Borntraeger 	}
14138f2abe6aSChristian Borntraeger 
1414b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1415d7b0b5ebSCarsten Otte 
1416b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1417b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1418b0c632dbSHeiko Carstens 
1419b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
14207e8e6ab4SHeiko Carstens 	return rc;
1421b0c632dbSHeiko Carstens }
1422b0c632dbSHeiko Carstens 
1423b0c632dbSHeiko Carstens /*
1424b0c632dbSHeiko Carstens  * store status at address
1425b0c632dbSHeiko Carstens  * we use have two special cases:
1426b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1427b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1428b0c632dbSHeiko Carstens  */
1429d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1430b0c632dbSHeiko Carstens {
1431092670cdSCarsten Otte 	unsigned char archmode = 1;
1432fda902cbSMichael Mueller 	unsigned int px;
1433178bd789SThomas Huth 	u64 clkcomp;
1434d0bce605SHeiko Carstens 	int rc;
1435b0c632dbSHeiko Carstens 
1436d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1437d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1438b0c632dbSHeiko Carstens 			return -EFAULT;
1439d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1440d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1441d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1442b0c632dbSHeiko Carstens 			return -EFAULT;
1443d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1444d0bce605SHeiko Carstens 	}
1445d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1446d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1447d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1448d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1449d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1450d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1451fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1452d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1453fda902cbSMichael Mueller 			      &px, 4);
1454d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1455d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1456d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1457d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1458d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1459d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1460d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1461178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1462d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1463d0bce605SHeiko Carstens 			      &clkcomp, 8);
1464d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1465d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1466d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1467d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1468d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1469b0c632dbSHeiko Carstens }
1470b0c632dbSHeiko Carstens 
1471e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1472e879892cSThomas Huth {
1473e879892cSThomas Huth 	/*
1474e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1475e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1476e879892cSThomas Huth 	 * it into the save area
1477e879892cSThomas Huth 	 */
1478e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1479e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1480e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1481e879892cSThomas Huth 
1482e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1483e879892cSThomas Huth }
1484e879892cSThomas Huth 
14858ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14868ad35755SDavid Hildenbrand {
14878ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
14888ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
14898ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14908ad35755SDavid Hildenbrand }
14918ad35755SDavid Hildenbrand 
14928ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
14938ad35755SDavid Hildenbrand {
14948ad35755SDavid Hildenbrand 	unsigned int i;
14958ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
14968ad35755SDavid Hildenbrand 
14978ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
14988ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
14998ad35755SDavid Hildenbrand 	}
15008ad35755SDavid Hildenbrand }
15018ad35755SDavid Hildenbrand 
15028ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
15038ad35755SDavid Hildenbrand {
15048ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
15058ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
15068ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15078ad35755SDavid Hildenbrand }
15088ad35755SDavid Hildenbrand 
15096852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
15106852d7b6SDavid Hildenbrand {
15118ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15128ad35755SDavid Hildenbrand 
15138ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
15148ad35755SDavid Hildenbrand 		return;
15158ad35755SDavid Hildenbrand 
15166852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
15178ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1518433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15198ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15208ad35755SDavid Hildenbrand 
15218ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15228ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
15238ad35755SDavid Hildenbrand 			started_vcpus++;
15248ad35755SDavid Hildenbrand 	}
15258ad35755SDavid Hildenbrand 
15268ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
15278ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
15288ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
15298ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
15308ad35755SDavid Hildenbrand 		/*
15318ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
15328ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
15338ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
15348ad35755SDavid Hildenbrand 		 */
15358ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
15368ad35755SDavid Hildenbrand 	}
15378ad35755SDavid Hildenbrand 
15386852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
15398ad35755SDavid Hildenbrand 	/*
15408ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
15418ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
15428ad35755SDavid Hildenbrand 	 */
1543d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1544433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15458ad35755SDavid Hildenbrand 	return;
15466852d7b6SDavid Hildenbrand }
15476852d7b6SDavid Hildenbrand 
15486852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
15496852d7b6SDavid Hildenbrand {
15508ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15518ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
15528ad35755SDavid Hildenbrand 
15538ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
15548ad35755SDavid Hildenbrand 		return;
15558ad35755SDavid Hildenbrand 
15566852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
15578ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1558433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15598ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15608ad35755SDavid Hildenbrand 
156132f5ff63SDavid Hildenbrand 	/* Need to lock access to action_bits to avoid a SIGP race condition */
15624ae3c081SDavid Hildenbrand 	spin_lock(&vcpu->arch.local_int.lock);
15636852d7b6SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
156432f5ff63SDavid Hildenbrand 
156532f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
156632f5ff63SDavid Hildenbrand 	vcpu->arch.local_int.action_bits &=
156732f5ff63SDavid Hildenbrand 				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
15684ae3c081SDavid Hildenbrand 	spin_unlock(&vcpu->arch.local_int.lock);
156932f5ff63SDavid Hildenbrand 
15708ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
15718ad35755SDavid Hildenbrand 
15728ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15738ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
15748ad35755SDavid Hildenbrand 			started_vcpus++;
15758ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
15768ad35755SDavid Hildenbrand 		}
15778ad35755SDavid Hildenbrand 	}
15788ad35755SDavid Hildenbrand 
15798ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
15808ad35755SDavid Hildenbrand 		/*
15818ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
15828ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
15838ad35755SDavid Hildenbrand 		 */
15848ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
15858ad35755SDavid Hildenbrand 	}
15868ad35755SDavid Hildenbrand 
1587433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15888ad35755SDavid Hildenbrand 	return;
15896852d7b6SDavid Hildenbrand }
15906852d7b6SDavid Hildenbrand 
1591d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1592d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1593d6712df9SCornelia Huck {
1594d6712df9SCornelia Huck 	int r;
1595d6712df9SCornelia Huck 
1596d6712df9SCornelia Huck 	if (cap->flags)
1597d6712df9SCornelia Huck 		return -EINVAL;
1598d6712df9SCornelia Huck 
1599d6712df9SCornelia Huck 	switch (cap->cap) {
1600fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1601fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1602fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1603fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1604fa6b7fe9SCornelia Huck 		}
1605fa6b7fe9SCornelia Huck 		r = 0;
1606fa6b7fe9SCornelia Huck 		break;
1607d6712df9SCornelia Huck 	default:
1608d6712df9SCornelia Huck 		r = -EINVAL;
1609d6712df9SCornelia Huck 		break;
1610d6712df9SCornelia Huck 	}
1611d6712df9SCornelia Huck 	return r;
1612d6712df9SCornelia Huck }
1613d6712df9SCornelia Huck 
1614b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1615b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1616b0c632dbSHeiko Carstens {
1617b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1618b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1619800c1065SThomas Huth 	int idx;
1620bc923cc9SAvi Kivity 	long r;
1621b0c632dbSHeiko Carstens 
162293736624SAvi Kivity 	switch (ioctl) {
162393736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1624ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1625383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
1626ba5c1e9bSCarsten Otte 
162793736624SAvi Kivity 		r = -EFAULT;
1628ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
162993736624SAvi Kivity 			break;
1630383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
1631383d0b05SJens Freimann 			return -EINVAL;
1632383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
163393736624SAvi Kivity 		break;
1634ba5c1e9bSCarsten Otte 	}
1635b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1636800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1637bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1638800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1639bc923cc9SAvi Kivity 		break;
1640b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1641b0c632dbSHeiko Carstens 		psw_t psw;
1642b0c632dbSHeiko Carstens 
1643bc923cc9SAvi Kivity 		r = -EFAULT;
1644b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1645bc923cc9SAvi Kivity 			break;
1646bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1647bc923cc9SAvi Kivity 		break;
1648b0c632dbSHeiko Carstens 	}
1649b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1650bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1651bc923cc9SAvi Kivity 		break;
165214eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
165314eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
165414eebd91SCarsten Otte 		struct kvm_one_reg reg;
165514eebd91SCarsten Otte 		r = -EFAULT;
165614eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
165714eebd91SCarsten Otte 			break;
165814eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
165914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
166014eebd91SCarsten Otte 		else
166114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
166214eebd91SCarsten Otte 		break;
166314eebd91SCarsten Otte 	}
166427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
166527e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
166627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
166727e0393fSCarsten Otte 
166827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
166927e0393fSCarsten Otte 			r = -EFAULT;
167027e0393fSCarsten Otte 			break;
167127e0393fSCarsten Otte 		}
167227e0393fSCarsten Otte 
167327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
167427e0393fSCarsten Otte 			r = -EINVAL;
167527e0393fSCarsten Otte 			break;
167627e0393fSCarsten Otte 		}
167727e0393fSCarsten Otte 
167827e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
167927e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
168027e0393fSCarsten Otte 		break;
168127e0393fSCarsten Otte 	}
168227e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
168327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
168427e0393fSCarsten Otte 
168527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
168627e0393fSCarsten Otte 			r = -EFAULT;
168727e0393fSCarsten Otte 			break;
168827e0393fSCarsten Otte 		}
168927e0393fSCarsten Otte 
169027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
169127e0393fSCarsten Otte 			r = -EINVAL;
169227e0393fSCarsten Otte 			break;
169327e0393fSCarsten Otte 		}
169427e0393fSCarsten Otte 
169527e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
169627e0393fSCarsten Otte 			ucasmap.length);
169727e0393fSCarsten Otte 		break;
169827e0393fSCarsten Otte 	}
169927e0393fSCarsten Otte #endif
1700ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1701527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
1702ccc7910fSCarsten Otte 		break;
1703ccc7910fSCarsten Otte 	}
1704d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1705d6712df9SCornelia Huck 	{
1706d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1707d6712df9SCornelia Huck 		r = -EFAULT;
1708d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1709d6712df9SCornelia Huck 			break;
1710d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1711d6712df9SCornelia Huck 		break;
1712d6712df9SCornelia Huck 	}
1713b0c632dbSHeiko Carstens 	default:
17143e6afcf1SCarsten Otte 		r = -ENOTTY;
1715b0c632dbSHeiko Carstens 	}
1716bc923cc9SAvi Kivity 	return r;
1717b0c632dbSHeiko Carstens }
1718b0c632dbSHeiko Carstens 
17195b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
17205b1c1493SCarsten Otte {
17215b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
17225b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
17235b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
17245b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
17255b1c1493SCarsten Otte 		get_page(vmf->page);
17265b1c1493SCarsten Otte 		return 0;
17275b1c1493SCarsten Otte 	}
17285b1c1493SCarsten Otte #endif
17295b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
17305b1c1493SCarsten Otte }
17315b1c1493SCarsten Otte 
17325587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
17335587027cSAneesh Kumar K.V 			    unsigned long npages)
1734db3fe4ebSTakuya Yoshikawa {
1735db3fe4ebSTakuya Yoshikawa 	return 0;
1736db3fe4ebSTakuya Yoshikawa }
1737db3fe4ebSTakuya Yoshikawa 
1738b0c632dbSHeiko Carstens /* Section: memory related */
1739f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1740f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
17417b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
17427b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1743b0c632dbSHeiko Carstens {
1744dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1745dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1746dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1747dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1748b0c632dbSHeiko Carstens 
1749598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1750b0c632dbSHeiko Carstens 		return -EINVAL;
1751b0c632dbSHeiko Carstens 
1752598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1753b0c632dbSHeiko Carstens 		return -EINVAL;
1754b0c632dbSHeiko Carstens 
1755f7784b8eSMarcelo Tosatti 	return 0;
1756f7784b8eSMarcelo Tosatti }
1757f7784b8eSMarcelo Tosatti 
1758f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1759f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
17608482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
17618482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1762f7784b8eSMarcelo Tosatti {
1763f7850c92SCarsten Otte 	int rc;
1764f7784b8eSMarcelo Tosatti 
17652cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
17662cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
17672cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
17682cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
17692cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
17702cef4debSChristian Borntraeger 	 */
17712cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
17722cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
17732cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
17742cef4debSChristian Borntraeger 		return;
1775598841caSCarsten Otte 
1776598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1777598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1778598841caSCarsten Otte 	if (rc)
1779f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1780598841caSCarsten Otte 	return;
1781b0c632dbSHeiko Carstens }
1782b0c632dbSHeiko Carstens 
1783b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1784b0c632dbSHeiko Carstens {
1785ef50f7acSChristian Borntraeger 	int ret;
17860ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1787ef50f7acSChristian Borntraeger 	if (ret)
1788ef50f7acSChristian Borntraeger 		return ret;
1789ef50f7acSChristian Borntraeger 
1790ef50f7acSChristian Borntraeger 	/*
1791ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
179225985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1793ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1794ef50f7acSChristian Borntraeger 	 */
179578c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
179678c4b59fSMichael Mueller 	if (!vfacilities) {
1797ef50f7acSChristian Borntraeger 		kvm_exit();
1798ef50f7acSChristian Borntraeger 		return -ENOMEM;
1799ef50f7acSChristian Borntraeger 	}
180078c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
18017be81a46SChristian Borntraeger 	vfacilities[0] &= 0xff82fffbf47c2000UL;
18027feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1803ef50f7acSChristian Borntraeger 	return 0;
1804b0c632dbSHeiko Carstens }
1805b0c632dbSHeiko Carstens 
1806b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1807b0c632dbSHeiko Carstens {
180878c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1809b0c632dbSHeiko Carstens 	kvm_exit();
1810b0c632dbSHeiko Carstens }
1811b0c632dbSHeiko Carstens 
1812b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1813b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1814566af940SCornelia Huck 
1815566af940SCornelia Huck /*
1816566af940SCornelia Huck  * Enable autoloading of the kvm module.
1817566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1818566af940SCornelia Huck  * since x86 takes a different approach.
1819566af940SCornelia Huck  */
1820566af940SCornelia Huck #include <linux/miscdevice.h>
1821566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1822566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1823