xref: /linux/arch/s390/kvm/kvm-s390.c (revision 91520f1af8a01d349d19911238fc3dbed3fa58d2)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
29b0c632dbSHeiko Carstens #include <asm/lowcore.h>
30b0c632dbSHeiko Carstens #include <asm/pgtable.h>
31f5daba1dSHeiko Carstens #include <asm/nmi.h>
32a0616cdeSDavid Howells #include <asm/switch_to.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
54ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
55f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
56ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
58aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
59ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
607697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
61ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
69453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
70453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
71453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
72453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
73453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
748a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
75453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
76453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
77b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
78453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
79453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
80bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
815288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
82bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
837697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
925288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9342cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
96388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
97e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9841628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
99b0c632dbSHeiko Carstens 	{ NULL }
100b0c632dbSHeiko Carstens };
101b0c632dbSHeiko Carstens 
1029d8d5786SMichael Mueller /* upper facilities limit for kvm */
1039d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
1049d8d5786SMichael Mueller 	0xff82fffbf4fc2000UL,
1059d8d5786SMichael Mueller 	0x005c000000000000UL,
1069d8d5786SMichael Mueller };
107b0c632dbSHeiko Carstens 
1089d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
10978c4b59fSMichael Mueller {
1109d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1119d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
11278c4b59fSMichael Mueller }
11378c4b59fSMichael Mueller 
1149d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
1159d8d5786SMichael Mueller 
116b0c632dbSHeiko Carstens /* Section: not file related */
11713a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
118b0c632dbSHeiko Carstens {
119b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
12010474ae8SAlexander Graf 	return 0;
121b0c632dbSHeiko Carstens }
122b0c632dbSHeiko Carstens 
1232c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1242c70fe44SChristian Borntraeger 
125b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
126b0c632dbSHeiko Carstens {
1272c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1282c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
129b0c632dbSHeiko Carstens 	return 0;
130b0c632dbSHeiko Carstens }
131b0c632dbSHeiko Carstens 
132b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
133b0c632dbSHeiko Carstens {
1342c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
138b0c632dbSHeiko Carstens {
13984877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
14084877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
141b0c632dbSHeiko Carstens }
142b0c632dbSHeiko Carstens 
143b0c632dbSHeiko Carstens /* Section: device related */
144b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
145b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
146b0c632dbSHeiko Carstens {
147b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
148b0c632dbSHeiko Carstens 		return s390_enable_sie();
149b0c632dbSHeiko Carstens 	return -EINVAL;
150b0c632dbSHeiko Carstens }
151b0c632dbSHeiko Carstens 
152784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
153b0c632dbSHeiko Carstens {
154d7b0b5ebSCarsten Otte 	int r;
155d7b0b5ebSCarsten Otte 
1562bd0ac4eSCarsten Otte 	switch (ext) {
157d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
158b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15952e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1601efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1611efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1621efd0f59SCarsten Otte #endif
1633c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16460b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16514eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
166d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
167fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
168ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16910ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
170c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
171d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
17278599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
173f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1746352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1752444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
176d7b0b5ebSCarsten Otte 		r = 1;
177d7b0b5ebSCarsten Otte 		break;
178e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
179e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
180e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
181e726b1bdSChristian Borntraeger 		break;
182e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
183e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
184e1e2e605SNick Wang 		break;
1851526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
186abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1871526bf9cSChristian Borntraeger 		break;
1882bd0ac4eSCarsten Otte 	default:
189d7b0b5ebSCarsten Otte 		r = 0;
190b0c632dbSHeiko Carstens 	}
191d7b0b5ebSCarsten Otte 	return r;
1922bd0ac4eSCarsten Otte }
193b0c632dbSHeiko Carstens 
19415f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
19515f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
19615f36ebdSJason J. Herne {
19715f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19815f36ebdSJason J. Herne 	unsigned long address;
19915f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
20015f36ebdSJason J. Herne 
20115f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
20215f36ebdSJason J. Herne 	/* Loop over all guest pages */
20315f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
20415f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
20515f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
20615f36ebdSJason J. Herne 
20715f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20815f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20915f36ebdSJason J. Herne 	}
21015f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
21115f36ebdSJason J. Herne }
21215f36ebdSJason J. Herne 
213b0c632dbSHeiko Carstens /* Section: vm related */
214b0c632dbSHeiko Carstens /*
215b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
216b0c632dbSHeiko Carstens  */
217b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
219b0c632dbSHeiko Carstens {
22015f36ebdSJason J. Herne 	int r;
22115f36ebdSJason J. Herne 	unsigned long n;
22215f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
22315f36ebdSJason J. Herne 	int is_dirty = 0;
22415f36ebdSJason J. Herne 
22515f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
22615f36ebdSJason J. Herne 
22715f36ebdSJason J. Herne 	r = -EINVAL;
22815f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22915f36ebdSJason J. Herne 		goto out;
23015f36ebdSJason J. Herne 
23115f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
23215f36ebdSJason J. Herne 	r = -ENOENT;
23315f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
23415f36ebdSJason J. Herne 		goto out;
23515f36ebdSJason J. Herne 
23615f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23715f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23815f36ebdSJason J. Herne 	if (r)
23915f36ebdSJason J. Herne 		goto out;
24015f36ebdSJason J. Herne 
24115f36ebdSJason J. Herne 	/* Clear the dirty log */
24215f36ebdSJason J. Herne 	if (is_dirty) {
24315f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
24415f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
24515f36ebdSJason J. Herne 	}
24615f36ebdSJason J. Herne 	r = 0;
24715f36ebdSJason J. Herne out:
24815f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24915f36ebdSJason J. Herne 	return r;
250b0c632dbSHeiko Carstens }
251b0c632dbSHeiko Carstens 
252d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253d938dc55SCornelia Huck {
254d938dc55SCornelia Huck 	int r;
255d938dc55SCornelia Huck 
256d938dc55SCornelia Huck 	if (cap->flags)
257d938dc55SCornelia Huck 		return -EINVAL;
258d938dc55SCornelia Huck 
259d938dc55SCornelia Huck 	switch (cap->cap) {
26084223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
26184223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
26284223598SCornelia Huck 		r = 0;
26384223598SCornelia Huck 		break;
2642444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2652444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2662444b352SDavid Hildenbrand 		r = 0;
2672444b352SDavid Hildenbrand 		break;
268d938dc55SCornelia Huck 	default:
269d938dc55SCornelia Huck 		r = -EINVAL;
270d938dc55SCornelia Huck 		break;
271d938dc55SCornelia Huck 	}
272d938dc55SCornelia Huck 	return r;
273d938dc55SCornelia Huck }
274d938dc55SCornelia Huck 
2758c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2768c0a7ce6SDominik Dingel {
2778c0a7ce6SDominik Dingel 	int ret;
2788c0a7ce6SDominik Dingel 
2798c0a7ce6SDominik Dingel 	switch (attr->attr) {
2808c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2818c0a7ce6SDominik Dingel 		ret = 0;
2828c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2838c0a7ce6SDominik Dingel 			ret = -EFAULT;
2848c0a7ce6SDominik Dingel 		break;
2858c0a7ce6SDominik Dingel 	default:
2868c0a7ce6SDominik Dingel 		ret = -ENXIO;
2878c0a7ce6SDominik Dingel 		break;
2888c0a7ce6SDominik Dingel 	}
2898c0a7ce6SDominik Dingel 	return ret;
2908c0a7ce6SDominik Dingel }
2918c0a7ce6SDominik Dingel 
2928c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2934f718eabSDominik Dingel {
2944f718eabSDominik Dingel 	int ret;
2954f718eabSDominik Dingel 	unsigned int idx;
2964f718eabSDominik Dingel 	switch (attr->attr) {
2974f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2984f718eabSDominik Dingel 		ret = -EBUSY;
2994f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3004f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3014f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3024f718eabSDominik Dingel 			ret = 0;
3034f718eabSDominik Dingel 		}
3044f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3054f718eabSDominik Dingel 		break;
3064f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3074f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3084f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
309a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3104f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3114f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3124f718eabSDominik Dingel 		ret = 0;
3134f718eabSDominik Dingel 		break;
3148c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3158c0a7ce6SDominik Dingel 		unsigned long new_limit;
3168c0a7ce6SDominik Dingel 
3178c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3188c0a7ce6SDominik Dingel 			return -EINVAL;
3198c0a7ce6SDominik Dingel 
3208c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3218c0a7ce6SDominik Dingel 			return -EFAULT;
3228c0a7ce6SDominik Dingel 
3238c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3248c0a7ce6SDominik Dingel 			return -E2BIG;
3258c0a7ce6SDominik Dingel 
3268c0a7ce6SDominik Dingel 		ret = -EBUSY;
3278c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3288c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3298c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3308c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3318c0a7ce6SDominik Dingel 
3328c0a7ce6SDominik Dingel 			if (!new) {
3338c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3348c0a7ce6SDominik Dingel 			} else {
3358c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3368c0a7ce6SDominik Dingel 				new->private = kvm;
3378c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3388c0a7ce6SDominik Dingel 				ret = 0;
3398c0a7ce6SDominik Dingel 			}
3408c0a7ce6SDominik Dingel 		}
3418c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3428c0a7ce6SDominik Dingel 		break;
3438c0a7ce6SDominik Dingel 	}
3444f718eabSDominik Dingel 	default:
3454f718eabSDominik Dingel 		ret = -ENXIO;
3464f718eabSDominik Dingel 		break;
3474f718eabSDominik Dingel 	}
3484f718eabSDominik Dingel 	return ret;
3494f718eabSDominik Dingel }
3504f718eabSDominik Dingel 
351a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352a374e892STony Krowiak 
353a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354a374e892STony Krowiak {
355a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
356a374e892STony Krowiak 	int i;
357a374e892STony Krowiak 
3589d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
359a374e892STony Krowiak 		return -EINVAL;
360a374e892STony Krowiak 
361a374e892STony Krowiak 	mutex_lock(&kvm->lock);
362a374e892STony Krowiak 	switch (attr->attr) {
363a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364a374e892STony Krowiak 		get_random_bytes(
365a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
368a374e892STony Krowiak 		break;
369a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370a374e892STony Krowiak 		get_random_bytes(
371a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
374a374e892STony Krowiak 		break;
375a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
377a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379a374e892STony Krowiak 		break;
380a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
382a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384a374e892STony Krowiak 		break;
385a374e892STony Krowiak 	default:
386a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
387a374e892STony Krowiak 		return -ENXIO;
388a374e892STony Krowiak 	}
389a374e892STony Krowiak 
390a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
391a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
392a374e892STony Krowiak 		exit_sie(vcpu);
393a374e892STony Krowiak 	}
394a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
395a374e892STony Krowiak 	return 0;
396a374e892STony Krowiak }
397a374e892STony Krowiak 
39872f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
39972f25020SJason J. Herne {
40072f25020SJason J. Herne 	u8 gtod_high;
40172f25020SJason J. Herne 
40272f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
40372f25020SJason J. Herne 					   sizeof(gtod_high)))
40472f25020SJason J. Herne 		return -EFAULT;
40572f25020SJason J. Herne 
40672f25020SJason J. Herne 	if (gtod_high != 0)
40772f25020SJason J. Herne 		return -EINVAL;
40872f25020SJason J. Herne 
40972f25020SJason J. Herne 	return 0;
41072f25020SJason J. Herne }
41172f25020SJason J. Herne 
41272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
41372f25020SJason J. Herne {
41472f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
41572f25020SJason J. Herne 	unsigned int vcpu_idx;
41672f25020SJason J. Herne 	u64 host_tod, gtod;
41772f25020SJason J. Herne 	int r;
41872f25020SJason J. Herne 
41972f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
42072f25020SJason J. Herne 		return -EFAULT;
42172f25020SJason J. Herne 
42272f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
42372f25020SJason J. Herne 	if (r)
42472f25020SJason J. Herne 		return r;
42572f25020SJason J. Herne 
42672f25020SJason J. Herne 	mutex_lock(&kvm->lock);
42772f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
42872f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
42972f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
43072f25020SJason J. Herne 		exit_sie(cur_vcpu);
43172f25020SJason J. Herne 	}
43272f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
43372f25020SJason J. Herne 	return 0;
43472f25020SJason J. Herne }
43572f25020SJason J. Herne 
43672f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
43772f25020SJason J. Herne {
43872f25020SJason J. Herne 	int ret;
43972f25020SJason J. Herne 
44072f25020SJason J. Herne 	if (attr->flags)
44172f25020SJason J. Herne 		return -EINVAL;
44272f25020SJason J. Herne 
44372f25020SJason J. Herne 	switch (attr->attr) {
44472f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
44572f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
44672f25020SJason J. Herne 		break;
44772f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
44872f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
44972f25020SJason J. Herne 		break;
45072f25020SJason J. Herne 	default:
45172f25020SJason J. Herne 		ret = -ENXIO;
45272f25020SJason J. Herne 		break;
45372f25020SJason J. Herne 	}
45472f25020SJason J. Herne 	return ret;
45572f25020SJason J. Herne }
45672f25020SJason J. Herne 
45772f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
45872f25020SJason J. Herne {
45972f25020SJason J. Herne 	u8 gtod_high = 0;
46072f25020SJason J. Herne 
46172f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
46272f25020SJason J. Herne 					 sizeof(gtod_high)))
46372f25020SJason J. Herne 		return -EFAULT;
46472f25020SJason J. Herne 
46572f25020SJason J. Herne 	return 0;
46672f25020SJason J. Herne }
46772f25020SJason J. Herne 
46872f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
46972f25020SJason J. Herne {
47072f25020SJason J. Herne 	u64 host_tod, gtod;
47172f25020SJason J. Herne 	int r;
47272f25020SJason J. Herne 
47372f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
47472f25020SJason J. Herne 	if (r)
47572f25020SJason J. Herne 		return r;
47672f25020SJason J. Herne 
47772f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
47872f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
47972f25020SJason J. Herne 		return -EFAULT;
48072f25020SJason J. Herne 
48172f25020SJason J. Herne 	return 0;
48272f25020SJason J. Herne }
48372f25020SJason J. Herne 
48472f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
48572f25020SJason J. Herne {
48672f25020SJason J. Herne 	int ret;
48772f25020SJason J. Herne 
48872f25020SJason J. Herne 	if (attr->flags)
48972f25020SJason J. Herne 		return -EINVAL;
49072f25020SJason J. Herne 
49172f25020SJason J. Herne 	switch (attr->attr) {
49272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
49372f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
49472f25020SJason J. Herne 		break;
49572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
49672f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
49772f25020SJason J. Herne 		break;
49872f25020SJason J. Herne 	default:
49972f25020SJason J. Herne 		ret = -ENXIO;
50072f25020SJason J. Herne 		break;
50172f25020SJason J. Herne 	}
50272f25020SJason J. Herne 	return ret;
50372f25020SJason J. Herne }
50472f25020SJason J. Herne 
505658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
506658b6edaSMichael Mueller {
507658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
508658b6edaSMichael Mueller 	int ret = 0;
509658b6edaSMichael Mueller 
510658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
511658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
512658b6edaSMichael Mueller 		ret = -EBUSY;
513658b6edaSMichael Mueller 		goto out;
514658b6edaSMichael Mueller 	}
515658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
516658b6edaSMichael Mueller 	if (!proc) {
517658b6edaSMichael Mueller 		ret = -ENOMEM;
518658b6edaSMichael Mueller 		goto out;
519658b6edaSMichael Mueller 	}
520658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
521658b6edaSMichael Mueller 			    sizeof(*proc))) {
522658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523658b6edaSMichael Mueller 		       sizeof(struct cpuid));
524658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
525981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
526658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
527658b6edaSMichael Mueller 	} else
528658b6edaSMichael Mueller 		ret = -EFAULT;
529658b6edaSMichael Mueller 	kfree(proc);
530658b6edaSMichael Mueller out:
531658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
532658b6edaSMichael Mueller 	return ret;
533658b6edaSMichael Mueller }
534658b6edaSMichael Mueller 
535658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
536658b6edaSMichael Mueller {
537658b6edaSMichael Mueller 	int ret = -ENXIO;
538658b6edaSMichael Mueller 
539658b6edaSMichael Mueller 	switch (attr->attr) {
540658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
541658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
542658b6edaSMichael Mueller 		break;
543658b6edaSMichael Mueller 	}
544658b6edaSMichael Mueller 	return ret;
545658b6edaSMichael Mueller }
546658b6edaSMichael Mueller 
547658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
548658b6edaSMichael Mueller {
549658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
550658b6edaSMichael Mueller 	int ret = 0;
551658b6edaSMichael Mueller 
552658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
553658b6edaSMichael Mueller 	if (!proc) {
554658b6edaSMichael Mueller 		ret = -ENOMEM;
555658b6edaSMichael Mueller 		goto out;
556658b6edaSMichael Mueller 	}
557658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
559981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
560658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561658b6edaSMichael Mueller 		ret = -EFAULT;
562658b6edaSMichael Mueller 	kfree(proc);
563658b6edaSMichael Mueller out:
564658b6edaSMichael Mueller 	return ret;
565658b6edaSMichael Mueller }
566658b6edaSMichael Mueller 
567658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
568658b6edaSMichael Mueller {
569658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
570658b6edaSMichael Mueller 	int ret = 0;
571658b6edaSMichael Mueller 
572658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
573658b6edaSMichael Mueller 	if (!mach) {
574658b6edaSMichael Mueller 		ret = -ENOMEM;
575658b6edaSMichael Mueller 		goto out;
576658b6edaSMichael Mueller 	}
577658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
578658b6edaSMichael Mueller 	mach->ibc = sclp_get_ibc();
579981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
580981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
581658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
58294422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
583658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584658b6edaSMichael Mueller 		ret = -EFAULT;
585658b6edaSMichael Mueller 	kfree(mach);
586658b6edaSMichael Mueller out:
587658b6edaSMichael Mueller 	return ret;
588658b6edaSMichael Mueller }
589658b6edaSMichael Mueller 
590658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
591658b6edaSMichael Mueller {
592658b6edaSMichael Mueller 	int ret = -ENXIO;
593658b6edaSMichael Mueller 
594658b6edaSMichael Mueller 	switch (attr->attr) {
595658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
596658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
597658b6edaSMichael Mueller 		break;
598658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
599658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
600658b6edaSMichael Mueller 		break;
601658b6edaSMichael Mueller 	}
602658b6edaSMichael Mueller 	return ret;
603658b6edaSMichael Mueller }
604658b6edaSMichael Mueller 
605f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
606f2061656SDominik Dingel {
607f2061656SDominik Dingel 	int ret;
608f2061656SDominik Dingel 
609f2061656SDominik Dingel 	switch (attr->group) {
6104f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6118c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6124f718eabSDominik Dingel 		break;
61372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
61472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
61572f25020SJason J. Herne 		break;
616658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
617658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
618658b6edaSMichael Mueller 		break;
619a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
620a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
621a374e892STony Krowiak 		break;
622f2061656SDominik Dingel 	default:
623f2061656SDominik Dingel 		ret = -ENXIO;
624f2061656SDominik Dingel 		break;
625f2061656SDominik Dingel 	}
626f2061656SDominik Dingel 
627f2061656SDominik Dingel 	return ret;
628f2061656SDominik Dingel }
629f2061656SDominik Dingel 
630f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
631f2061656SDominik Dingel {
6328c0a7ce6SDominik Dingel 	int ret;
6338c0a7ce6SDominik Dingel 
6348c0a7ce6SDominik Dingel 	switch (attr->group) {
6358c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6368c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
6378c0a7ce6SDominik Dingel 		break;
63872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
63972f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
64072f25020SJason J. Herne 		break;
641658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
642658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
643658b6edaSMichael Mueller 		break;
6448c0a7ce6SDominik Dingel 	default:
6458c0a7ce6SDominik Dingel 		ret = -ENXIO;
6468c0a7ce6SDominik Dingel 		break;
6478c0a7ce6SDominik Dingel 	}
6488c0a7ce6SDominik Dingel 
6498c0a7ce6SDominik Dingel 	return ret;
650f2061656SDominik Dingel }
651f2061656SDominik Dingel 
652f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653f2061656SDominik Dingel {
654f2061656SDominik Dingel 	int ret;
655f2061656SDominik Dingel 
656f2061656SDominik Dingel 	switch (attr->group) {
6574f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6584f718eabSDominik Dingel 		switch (attr->attr) {
6594f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
6604f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
6618c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
6624f718eabSDominik Dingel 			ret = 0;
6634f718eabSDominik Dingel 			break;
6644f718eabSDominik Dingel 		default:
6654f718eabSDominik Dingel 			ret = -ENXIO;
6664f718eabSDominik Dingel 			break;
6674f718eabSDominik Dingel 		}
6684f718eabSDominik Dingel 		break;
66972f25020SJason J. Herne 	case KVM_S390_VM_TOD:
67072f25020SJason J. Herne 		switch (attr->attr) {
67172f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
67272f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
67372f25020SJason J. Herne 			ret = 0;
67472f25020SJason J. Herne 			break;
67572f25020SJason J. Herne 		default:
67672f25020SJason J. Herne 			ret = -ENXIO;
67772f25020SJason J. Herne 			break;
67872f25020SJason J. Herne 		}
67972f25020SJason J. Herne 		break;
680658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
681658b6edaSMichael Mueller 		switch (attr->attr) {
682658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
683658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
684658b6edaSMichael Mueller 			ret = 0;
685658b6edaSMichael Mueller 			break;
686658b6edaSMichael Mueller 		default:
687658b6edaSMichael Mueller 			ret = -ENXIO;
688658b6edaSMichael Mueller 			break;
689658b6edaSMichael Mueller 		}
690658b6edaSMichael Mueller 		break;
691a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
692a374e892STony Krowiak 		switch (attr->attr) {
693a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
694a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
695a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
696a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
697a374e892STony Krowiak 			ret = 0;
698a374e892STony Krowiak 			break;
699a374e892STony Krowiak 		default:
700a374e892STony Krowiak 			ret = -ENXIO;
701a374e892STony Krowiak 			break;
702a374e892STony Krowiak 		}
703a374e892STony Krowiak 		break;
704f2061656SDominik Dingel 	default:
705f2061656SDominik Dingel 		ret = -ENXIO;
706f2061656SDominik Dingel 		break;
707f2061656SDominik Dingel 	}
708f2061656SDominik Dingel 
709f2061656SDominik Dingel 	return ret;
710f2061656SDominik Dingel }
711f2061656SDominik Dingel 
712b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
713b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
714b0c632dbSHeiko Carstens {
715b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
716b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
717f2061656SDominik Dingel 	struct kvm_device_attr attr;
718b0c632dbSHeiko Carstens 	int r;
719b0c632dbSHeiko Carstens 
720b0c632dbSHeiko Carstens 	switch (ioctl) {
721ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
722ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
723ba5c1e9bSCarsten Otte 
724ba5c1e9bSCarsten Otte 		r = -EFAULT;
725ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
726ba5c1e9bSCarsten Otte 			break;
727ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
728ba5c1e9bSCarsten Otte 		break;
729ba5c1e9bSCarsten Otte 	}
730d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
731d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
732d938dc55SCornelia Huck 		r = -EFAULT;
733d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
734d938dc55SCornelia Huck 			break;
735d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
736d938dc55SCornelia Huck 		break;
737d938dc55SCornelia Huck 	}
73884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
73984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
74084223598SCornelia Huck 
74184223598SCornelia Huck 		r = -EINVAL;
74284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
74384223598SCornelia Huck 			/* Set up dummy routing. */
74484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
74584223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
74684223598SCornelia Huck 			r = 0;
74784223598SCornelia Huck 		}
74884223598SCornelia Huck 		break;
74984223598SCornelia Huck 	}
750f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
751f2061656SDominik Dingel 		r = -EFAULT;
752f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
753f2061656SDominik Dingel 			break;
754f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
755f2061656SDominik Dingel 		break;
756f2061656SDominik Dingel 	}
757f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
758f2061656SDominik Dingel 		r = -EFAULT;
759f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
760f2061656SDominik Dingel 			break;
761f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
762f2061656SDominik Dingel 		break;
763f2061656SDominik Dingel 	}
764f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
765f2061656SDominik Dingel 		r = -EFAULT;
766f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
767f2061656SDominik Dingel 			break;
768f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
769f2061656SDominik Dingel 		break;
770f2061656SDominik Dingel 	}
771b0c632dbSHeiko Carstens 	default:
772367e1319SAvi Kivity 		r = -ENOTTY;
773b0c632dbSHeiko Carstens 	}
774b0c632dbSHeiko Carstens 
775b0c632dbSHeiko Carstens 	return r;
776b0c632dbSHeiko Carstens }
777b0c632dbSHeiko Carstens 
77845c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
77945c9b47cSTony Krowiak {
78045c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
78186044c8cSChristian Borntraeger 	u32 cc = 0;
78245c9b47cSTony Krowiak 
78386044c8cSChristian Borntraeger 	memset(config, 0, 128);
78445c9b47cSTony Krowiak 	asm volatile(
78545c9b47cSTony Krowiak 		"lgr 0,%1\n"
78645c9b47cSTony Krowiak 		"lgr 2,%2\n"
78745c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
78886044c8cSChristian Borntraeger 		"0: ipm %0\n"
78945c9b47cSTony Krowiak 		"srl %0,28\n"
79086044c8cSChristian Borntraeger 		"1:\n"
79186044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
79286044c8cSChristian Borntraeger 		: "+r" (cc)
79345c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
79445c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
79545c9b47cSTony Krowiak 	);
79645c9b47cSTony Krowiak 
79745c9b47cSTony Krowiak 	return cc;
79845c9b47cSTony Krowiak }
79945c9b47cSTony Krowiak 
80045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
80145c9b47cSTony Krowiak {
80245c9b47cSTony Krowiak 	u8 config[128];
80345c9b47cSTony Krowiak 	int cc;
80445c9b47cSTony Krowiak 
80545c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
80645c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
80745c9b47cSTony Krowiak 
80845c9b47cSTony Krowiak 		if (cc)
80945c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
81045c9b47cSTony Krowiak 		else
81145c9b47cSTony Krowiak 			return config[0] & 0x40;
81245c9b47cSTony Krowiak 	}
81345c9b47cSTony Krowiak 
81445c9b47cSTony Krowiak 	return 0;
81545c9b47cSTony Krowiak }
81645c9b47cSTony Krowiak 
81745c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
81845c9b47cSTony Krowiak {
81945c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
82045c9b47cSTony Krowiak 
82145c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
82245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
82345c9b47cSTony Krowiak 	else
82445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
82545c9b47cSTony Krowiak }
82645c9b47cSTony Krowiak 
8279d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
8289d8d5786SMichael Mueller {
8299d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
8309d8d5786SMichael Mueller 	cpu_id->version = 0xff;
8319d8d5786SMichael Mueller }
8329d8d5786SMichael Mueller 
8335102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
8345102ee87STony Krowiak {
8359d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
8365102ee87STony Krowiak 		return 0;
8375102ee87STony Krowiak 
8385102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
8395102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
8405102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
8415102ee87STony Krowiak 		return -ENOMEM;
8425102ee87STony Krowiak 
84345c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
8445102ee87STony Krowiak 
845ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
846ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
847ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
848ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
849ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
850ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
851ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
852a374e892STony Krowiak 
8535102ee87STony Krowiak 	return 0;
8545102ee87STony Krowiak }
8555102ee87STony Krowiak 
856e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
857b0c632dbSHeiko Carstens {
8589d8d5786SMichael Mueller 	int i, rc;
859b0c632dbSHeiko Carstens 	char debug_name[16];
860f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
861b0c632dbSHeiko Carstens 
862e08b9637SCarsten Otte 	rc = -EINVAL;
863e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
864e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
865e08b9637SCarsten Otte 		goto out_err;
866e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
867e08b9637SCarsten Otte 		goto out_err;
868e08b9637SCarsten Otte #else
869e08b9637SCarsten Otte 	if (type)
870e08b9637SCarsten Otte 		goto out_err;
871e08b9637SCarsten Otte #endif
872e08b9637SCarsten Otte 
873b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
874b0c632dbSHeiko Carstens 	if (rc)
875d89f5effSJan Kiszka 		goto out_err;
876b0c632dbSHeiko Carstens 
877b290411aSCarsten Otte 	rc = -ENOMEM;
878b290411aSCarsten Otte 
879b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
880b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
881d89f5effSJan Kiszka 		goto out_err;
882f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
883f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
884f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
885f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
886b0c632dbSHeiko Carstens 
887b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
888b0c632dbSHeiko Carstens 
889b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
890b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
891b0c632dbSHeiko Carstens 		goto out_nodbf;
892b0c632dbSHeiko Carstens 
8939d8d5786SMichael Mueller 	/*
8949d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
8959d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
896981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
897981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
8989d8d5786SMichael Mueller 	 * 31 bits and word aligned.
8999d8d5786SMichael Mueller 	 */
9009d8d5786SMichael Mueller 	kvm->arch.model.fac =
901981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9029d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
9039d8d5786SMichael Mueller 		goto out_nofac;
9049d8d5786SMichael Mueller 
905fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
906981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
90794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
9089d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
9099d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
910981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9119d8d5786SMichael Mueller 		else
912981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
9139d8d5786SMichael Mueller 	}
9149d8d5786SMichael Mueller 
915981467c9SMichael Mueller 	/* Populate the facility list initially. */
916981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
917981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
918981467c9SMichael Mueller 
9199d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
920658b6edaSMichael Mueller 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9219d8d5786SMichael Mueller 
9225102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
9235102ee87STony Krowiak 		goto out_crypto;
9245102ee87STony Krowiak 
925ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
926ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
9278a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
928a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
929ba5c1e9bSCarsten Otte 
930b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
931b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
932b0c632dbSHeiko Carstens 
933e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
934e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
935e08b9637SCarsten Otte 	} else {
9360349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
937598841caSCarsten Otte 		if (!kvm->arch.gmap)
938598841caSCarsten Otte 			goto out_nogmap;
9392c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
94024eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
941e08b9637SCarsten Otte 	}
942fa6b7fe9SCornelia Huck 
943fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
94484223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
94572f25020SJason J. Herne 	kvm->arch.epoch = 0;
946fa6b7fe9SCornelia Huck 
9478ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
9488ad35755SDavid Hildenbrand 
949d89f5effSJan Kiszka 	return 0;
950598841caSCarsten Otte out_nogmap:
9515102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
9525102ee87STony Krowiak out_crypto:
9539d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
9549d8d5786SMichael Mueller out_nofac:
955598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
956b0c632dbSHeiko Carstens out_nodbf:
957b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
958d89f5effSJan Kiszka out_err:
959d89f5effSJan Kiszka 	return rc;
960b0c632dbSHeiko Carstens }
961b0c632dbSHeiko Carstens 
962d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
963d329c035SChristian Borntraeger {
964d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
965ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
96667335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
9673c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
96858f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
96958f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
97058f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
971abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
972abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
973abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
97458f9460bSCarsten Otte 	}
975abf4a71eSCarsten Otte 	smp_mb();
97627e0393fSCarsten Otte 
97727e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
97827e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
97927e0393fSCarsten Otte 
980b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
981b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
982d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
983b31288faSKonstantin Weitz 
9846692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
985b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
986d329c035SChristian Borntraeger }
987d329c035SChristian Borntraeger 
988d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
989d329c035SChristian Borntraeger {
990d329c035SChristian Borntraeger 	unsigned int i;
991988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
992d329c035SChristian Borntraeger 
993988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
994988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
995988a2caeSGleb Natapov 
996988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
997988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
998d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
999988a2caeSGleb Natapov 
1000988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1001988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1002d329c035SChristian Borntraeger }
1003d329c035SChristian Borntraeger 
1004b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1005b0c632dbSHeiko Carstens {
1006d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
10079d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1008b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1009d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
10105102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
101127e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1012598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1013841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
101467335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1015b0c632dbSHeiko Carstens }
1016b0c632dbSHeiko Carstens 
1017b0c632dbSHeiko Carstens /* Section: vcpu related */
1018dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1019b0c632dbSHeiko Carstens {
1020c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
102127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
102227e0393fSCarsten Otte 		return -ENOMEM;
10232c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1024dafd032aSDominik Dingel 
102527e0393fSCarsten Otte 	return 0;
102627e0393fSCarsten Otte }
102727e0393fSCarsten Otte 
1028dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1029dafd032aSDominik Dingel {
1030dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1031dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
103259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
103359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
10349eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1035b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1036b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1037b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
1038dafd032aSDominik Dingel 
1039dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1040dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1041dafd032aSDominik Dingel 
1042b0c632dbSHeiko Carstens 	return 0;
1043b0c632dbSHeiko Carstens }
1044b0c632dbSHeiko Carstens 
1045b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1046b0c632dbSHeiko Carstens {
10474725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
10484725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
1049b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
10504725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10514725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
105259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1053480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
10549e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1055b0c632dbSHeiko Carstens }
1056b0c632dbSHeiko Carstens 
1057b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1058b0c632dbSHeiko Carstens {
10599e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1060480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
10614725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10624725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
106359674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
10644725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
10654725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1066b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1067b0c632dbSHeiko Carstens }
1068b0c632dbSHeiko Carstens 
1069b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1070b0c632dbSHeiko Carstens {
1071b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1072b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1073b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
10748d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1075b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1076b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1077b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1078b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1079b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1080b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1081b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1082b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1083b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1084672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
10853c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
10863c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
10876352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
10886852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10892ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1090b0c632dbSHeiko Carstens }
1091b0c632dbSHeiko Carstens 
109231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
109342897d86SMarcelo Tosatti {
109472f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
109572f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
109672f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1097dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1098dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
109942897d86SMarcelo Tosatti }
110042897d86SMarcelo Tosatti 
11015102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
11025102ee87STony Krowiak {
11039d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
11045102ee87STony Krowiak 		return;
11055102ee87STony Krowiak 
1106a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1107a374e892STony Krowiak 
1108a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1109a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1110a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1111a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1112a374e892STony Krowiak 
11135102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
11145102ee87STony Krowiak }
11155102ee87STony Krowiak 
1116b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1117b31605c1SDominik Dingel {
1118b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1119b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1120b31605c1SDominik Dingel }
1121b31605c1SDominik Dingel 
1122b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1123b31605c1SDominik Dingel {
1124b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1125b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1126b31605c1SDominik Dingel 		return -ENOMEM;
1127b31605c1SDominik Dingel 
1128b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1129b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1130b31605c1SDominik Dingel 	return 0;
1131b31605c1SDominik Dingel }
1132b31605c1SDominik Dingel 
1133*91520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1134*91520f1aSMichael Mueller {
1135*91520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1136*91520f1aSMichael Mueller 
1137*91520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
1138*91520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
1139*91520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1140*91520f1aSMichael Mueller }
1141*91520f1aSMichael Mueller 
1142b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1143b0c632dbSHeiko Carstens {
1144b31605c1SDominik Dingel 	int rc = 0;
1145b31288faSKonstantin Weitz 
11469e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
11479e6dabefSCornelia Huck 						    CPUSTAT_SM |
114869d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
114969d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
1150*91520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
1151*91520f1aSMichael Mueller 
1152fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
11539d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
11547feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
11557feb6bb8SMichael Mueller 
115669d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1157ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1158217a4406SHeiko Carstens 	if (sclp_has_siif())
1159217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
1160ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
1161ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1162492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
11635a5e6536SMatthew Rosato 
1164b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1165b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1166b31605c1SDominik Dingel 		if (rc)
1167b31605c1SDominik Dingel 			return rc;
1168b31288faSKonstantin Weitz 	}
11690ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1170ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
11719d8d5786SMichael Mueller 
11725102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
11735102ee87STony Krowiak 
1174b31605c1SDominik Dingel 	return rc;
1175b0c632dbSHeiko Carstens }
1176b0c632dbSHeiko Carstens 
1177b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1178b0c632dbSHeiko Carstens 				      unsigned int id)
1179b0c632dbSHeiko Carstens {
11804d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
11817feb6bb8SMichael Mueller 	struct sie_page *sie_page;
11824d47555aSCarsten Otte 	int rc = -EINVAL;
1183b0c632dbSHeiko Carstens 
11844d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
11854d47555aSCarsten Otte 		goto out;
11864d47555aSCarsten Otte 
11874d47555aSCarsten Otte 	rc = -ENOMEM;
11884d47555aSCarsten Otte 
1189b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1190b0c632dbSHeiko Carstens 	if (!vcpu)
11914d47555aSCarsten Otte 		goto out;
1192b0c632dbSHeiko Carstens 
11937feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
11947feb6bb8SMichael Mueller 	if (!sie_page)
1195b0c632dbSHeiko Carstens 		goto out_free_cpu;
1196b0c632dbSHeiko Carstens 
11977feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
11987feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
11997feb6bb8SMichael Mueller 
1200b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
120158f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
120258f9460bSCarsten Otte 		if (!kvm->arch.sca) {
120358f9460bSCarsten Otte 			WARN_ON_ONCE(1);
120458f9460bSCarsten Otte 			goto out_free_cpu;
120558f9460bSCarsten Otte 		}
1206abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
120758f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
120858f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
120958f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
121058f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1211b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1212fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
121358f9460bSCarsten Otte 	}
1214b0c632dbSHeiko Carstens 
1215ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1216ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1217d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
12185288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1219ba5c1e9bSCarsten Otte 
1220b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1221b0c632dbSHeiko Carstens 	if (rc)
12227b06bf2fSWei Yongjun 		goto out_free_sie_block;
1223b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1224b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1225ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1226b0c632dbSHeiko Carstens 
1227b0c632dbSHeiko Carstens 	return vcpu;
12287b06bf2fSWei Yongjun out_free_sie_block:
12297b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1230b0c632dbSHeiko Carstens out_free_cpu:
1231b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
12324d47555aSCarsten Otte out:
1233b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1234b0c632dbSHeiko Carstens }
1235b0c632dbSHeiko Carstens 
1236b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1237b0c632dbSHeiko Carstens {
12389a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1239b0c632dbSHeiko Carstens }
1240b0c632dbSHeiko Carstens 
124149b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
124249b99e1eSChristian Borntraeger {
124349b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
124449b99e1eSChristian Borntraeger }
124549b99e1eSChristian Borntraeger 
124649b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
124749b99e1eSChristian Borntraeger {
124849b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
124949b99e1eSChristian Borntraeger }
125049b99e1eSChristian Borntraeger 
125149b99e1eSChristian Borntraeger /*
125249b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
125349b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
125449b99e1eSChristian Borntraeger  * return immediately. */
125549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
125649b99e1eSChristian Borntraeger {
125749b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
125849b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
125949b99e1eSChristian Borntraeger 		cpu_relax();
126049b99e1eSChristian Borntraeger }
126149b99e1eSChristian Borntraeger 
126249b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
126349b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
126449b99e1eSChristian Borntraeger {
126549b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
126649b99e1eSChristian Borntraeger 	exit_sie(vcpu);
126749b99e1eSChristian Borntraeger }
126849b99e1eSChristian Borntraeger 
12692c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
12702c70fe44SChristian Borntraeger {
12712c70fe44SChristian Borntraeger 	int i;
12722c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
12732c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
12742c70fe44SChristian Borntraeger 
12752c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
12762c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1277fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
12782c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
12792c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
12802c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
12812c70fe44SChristian Borntraeger 		}
12822c70fe44SChristian Borntraeger 	}
12832c70fe44SChristian Borntraeger }
12842c70fe44SChristian Borntraeger 
1285b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1286b6d33834SChristoffer Dall {
1287b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1288b6d33834SChristoffer Dall 	BUG();
1289b6d33834SChristoffer Dall 	return 0;
1290b6d33834SChristoffer Dall }
1291b6d33834SChristoffer Dall 
129214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
129314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
129414eebd91SCarsten Otte {
129514eebd91SCarsten Otte 	int r = -EINVAL;
129614eebd91SCarsten Otte 
129714eebd91SCarsten Otte 	switch (reg->id) {
129829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
129929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
130029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
130129b7c71bSCarsten Otte 		break;
130229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
130329b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
130429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
130529b7c71bSCarsten Otte 		break;
130646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
130746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
130846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
130946a6dd1cSJason J. herne 		break;
131046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
131146a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
131246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
131346a6dd1cSJason J. herne 		break;
1314536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1315536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1316536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1317536336c2SDominik Dingel 		break;
1318536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1319536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1320536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1321536336c2SDominik Dingel 		break;
1322536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1323536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1324536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1325536336c2SDominik Dingel 		break;
1326672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1327672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1328672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1329672550fbSChristian Borntraeger 		break;
1330afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1331afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1332afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1333afa45ff5SChristian Borntraeger 		break;
133414eebd91SCarsten Otte 	default:
133514eebd91SCarsten Otte 		break;
133614eebd91SCarsten Otte 	}
133714eebd91SCarsten Otte 
133814eebd91SCarsten Otte 	return r;
133914eebd91SCarsten Otte }
134014eebd91SCarsten Otte 
134114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
134214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
134314eebd91SCarsten Otte {
134414eebd91SCarsten Otte 	int r = -EINVAL;
134514eebd91SCarsten Otte 
134614eebd91SCarsten Otte 	switch (reg->id) {
134729b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
134829b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
134929b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
135029b7c71bSCarsten Otte 		break;
135129b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
135229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
135329b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
135429b7c71bSCarsten Otte 		break;
135546a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
135646a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
135746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
135846a6dd1cSJason J. herne 		break;
135946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
136046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
136146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
136246a6dd1cSJason J. herne 		break;
1363536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1364536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1365536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
13669fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
13679fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1368536336c2SDominik Dingel 		break;
1369536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1370536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1371536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1372536336c2SDominik Dingel 		break;
1373536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1374536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1375536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1376536336c2SDominik Dingel 		break;
1377672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1378672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1379672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1380672550fbSChristian Borntraeger 		break;
1381afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1382afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1383afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1384afa45ff5SChristian Borntraeger 		break;
138514eebd91SCarsten Otte 	default:
138614eebd91SCarsten Otte 		break;
138714eebd91SCarsten Otte 	}
138814eebd91SCarsten Otte 
138914eebd91SCarsten Otte 	return r;
139014eebd91SCarsten Otte }
1391b6d33834SChristoffer Dall 
1392b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1393b0c632dbSHeiko Carstens {
1394b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1395b0c632dbSHeiko Carstens 	return 0;
1396b0c632dbSHeiko Carstens }
1397b0c632dbSHeiko Carstens 
1398b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1399b0c632dbSHeiko Carstens {
14005a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1401b0c632dbSHeiko Carstens 	return 0;
1402b0c632dbSHeiko Carstens }
1403b0c632dbSHeiko Carstens 
1404b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1405b0c632dbSHeiko Carstens {
14065a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1407b0c632dbSHeiko Carstens 	return 0;
1408b0c632dbSHeiko Carstens }
1409b0c632dbSHeiko Carstens 
1410b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1411b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1412b0c632dbSHeiko Carstens {
141359674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1414b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
141559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1416b0c632dbSHeiko Carstens 	return 0;
1417b0c632dbSHeiko Carstens }
1418b0c632dbSHeiko Carstens 
1419b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1420b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1421b0c632dbSHeiko Carstens {
142259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1423b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1424b0c632dbSHeiko Carstens 	return 0;
1425b0c632dbSHeiko Carstens }
1426b0c632dbSHeiko Carstens 
1427b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1428b0c632dbSHeiko Carstens {
14294725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
14304725c860SMartin Schwidefsky 		return -EINVAL;
1431b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
14324725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
14334725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
14344725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1435b0c632dbSHeiko Carstens 	return 0;
1436b0c632dbSHeiko Carstens }
1437b0c632dbSHeiko Carstens 
1438b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1439b0c632dbSHeiko Carstens {
1440b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1441b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1442b0c632dbSHeiko Carstens 	return 0;
1443b0c632dbSHeiko Carstens }
1444b0c632dbSHeiko Carstens 
1445b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1446b0c632dbSHeiko Carstens {
1447b0c632dbSHeiko Carstens 	int rc = 0;
1448b0c632dbSHeiko Carstens 
14497a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1450b0c632dbSHeiko Carstens 		rc = -EBUSY;
1451d7b0b5ebSCarsten Otte 	else {
1452d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1453d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1454d7b0b5ebSCarsten Otte 	}
1455b0c632dbSHeiko Carstens 	return rc;
1456b0c632dbSHeiko Carstens }
1457b0c632dbSHeiko Carstens 
1458b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1459b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1460b0c632dbSHeiko Carstens {
1461b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1462b0c632dbSHeiko Carstens }
1463b0c632dbSHeiko Carstens 
146427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
146527291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
146627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
146727291e21SDavid Hildenbrand 
1468d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1469d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1470b0c632dbSHeiko Carstens {
147127291e21SDavid Hildenbrand 	int rc = 0;
147227291e21SDavid Hildenbrand 
147327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
147427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
147527291e21SDavid Hildenbrand 
14762de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
147727291e21SDavid Hildenbrand 		return -EINVAL;
147827291e21SDavid Hildenbrand 
147927291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
148027291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
148127291e21SDavid Hildenbrand 		/* enforce guest PER */
148227291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
148327291e21SDavid Hildenbrand 
148427291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
148527291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
148627291e21SDavid Hildenbrand 	} else {
148727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
148827291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
148927291e21SDavid Hildenbrand 	}
149027291e21SDavid Hildenbrand 
149127291e21SDavid Hildenbrand 	if (rc) {
149227291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
149327291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
149427291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
149527291e21SDavid Hildenbrand 	}
149627291e21SDavid Hildenbrand 
149727291e21SDavid Hildenbrand 	return rc;
1498b0c632dbSHeiko Carstens }
1499b0c632dbSHeiko Carstens 
150062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
150162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
150262d9f0dbSMarcelo Tosatti {
15036352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
15046352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
15056352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
150662d9f0dbSMarcelo Tosatti }
150762d9f0dbSMarcelo Tosatti 
150862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
150962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
151062d9f0dbSMarcelo Tosatti {
15116352e4d2SDavid Hildenbrand 	int rc = 0;
15126352e4d2SDavid Hildenbrand 
15136352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
15146352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
15156352e4d2SDavid Hildenbrand 
15166352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
15176352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
15186352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15196352e4d2SDavid Hildenbrand 		break;
15206352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
15216352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15226352e4d2SDavid Hildenbrand 		break;
15236352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
15246352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
15256352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
15266352e4d2SDavid Hildenbrand 	default:
15276352e4d2SDavid Hildenbrand 		rc = -ENXIO;
15286352e4d2SDavid Hildenbrand 	}
15296352e4d2SDavid Hildenbrand 
15306352e4d2SDavid Hildenbrand 	return rc;
153162d9f0dbSMarcelo Tosatti }
153262d9f0dbSMarcelo Tosatti 
1533b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1534b31605c1SDominik Dingel {
1535b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1536b31605c1SDominik Dingel 		return false;
1537b31605c1SDominik Dingel 	/* only enable for z10 and later */
1538b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1539b31605c1SDominik Dingel 		return false;
1540b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1541b31605c1SDominik Dingel 		return false;
1542b31605c1SDominik Dingel 	return true;
1543b31605c1SDominik Dingel }
1544b31605c1SDominik Dingel 
15458ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
15468ad35755SDavid Hildenbrand {
15478ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
15488ad35755SDavid Hildenbrand }
15498ad35755SDavid Hildenbrand 
15502c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
15512c70fe44SChristian Borntraeger {
15528ad35755SDavid Hildenbrand retry:
15538ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
15542c70fe44SChristian Borntraeger 	/*
15552c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
15562c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
15572c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
15582c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
15592c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
15602c70fe44SChristian Borntraeger 	 */
15618ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
15622c70fe44SChristian Borntraeger 		int rc;
15632c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1564fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
15652c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
15662c70fe44SChristian Borntraeger 		if (rc)
15672c70fe44SChristian Borntraeger 			return rc;
15688ad35755SDavid Hildenbrand 		goto retry;
15692c70fe44SChristian Borntraeger 	}
15708ad35755SDavid Hildenbrand 
1571d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1572d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1573d3d692c8SDavid Hildenbrand 		goto retry;
1574d3d692c8SDavid Hildenbrand 	}
1575d3d692c8SDavid Hildenbrand 
15768ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
15778ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
15788ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
15798ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
15808ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
15818ad35755SDavid Hildenbrand 		}
15828ad35755SDavid Hildenbrand 		goto retry;
15838ad35755SDavid Hildenbrand 	}
15848ad35755SDavid Hildenbrand 
15858ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
15868ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
15878ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
15888ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
15898ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
15908ad35755SDavid Hildenbrand 		}
15918ad35755SDavid Hildenbrand 		goto retry;
15928ad35755SDavid Hildenbrand 	}
15938ad35755SDavid Hildenbrand 
15940759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
15950759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
15960759d068SDavid Hildenbrand 
15972c70fe44SChristian Borntraeger 	return 0;
15982c70fe44SChristian Borntraeger }
15992c70fe44SChristian Borntraeger 
1600fa576c58SThomas Huth /**
1601fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1602fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1603fa576c58SThomas Huth  * @gpa: Guest physical address
1604fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1605fa576c58SThomas Huth  *
1606fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1607fa576c58SThomas Huth  *
1608fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1609fa576c58SThomas Huth  */
1610fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
161124eb3a82SDominik Dingel {
1612527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1613527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
161424eb3a82SDominik Dingel }
161524eb3a82SDominik Dingel 
16163c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
16173c038e6bSDominik Dingel 				      unsigned long token)
16183c038e6bSDominik Dingel {
16193c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1620383d0b05SJens Freimann 	struct kvm_s390_irq irq;
16213c038e6bSDominik Dingel 
16223c038e6bSDominik Dingel 	if (start_token) {
1623383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1624383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1625383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
16263c038e6bSDominik Dingel 	} else {
16273c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1628383d0b05SJens Freimann 		inti.parm64 = token;
16293c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
16303c038e6bSDominik Dingel 	}
16313c038e6bSDominik Dingel }
16323c038e6bSDominik Dingel 
16333c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
16343c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
16353c038e6bSDominik Dingel {
16363c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
16373c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
16383c038e6bSDominik Dingel }
16393c038e6bSDominik Dingel 
16403c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
16413c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
16423c038e6bSDominik Dingel {
16433c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
16443c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
16453c038e6bSDominik Dingel }
16463c038e6bSDominik Dingel 
16473c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
16483c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
16493c038e6bSDominik Dingel {
16503c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
16513c038e6bSDominik Dingel }
16523c038e6bSDominik Dingel 
16533c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
16543c038e6bSDominik Dingel {
16553c038e6bSDominik Dingel 	/*
16563c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
16573c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
16583c038e6bSDominik Dingel 	 */
16593c038e6bSDominik Dingel 	return true;
16603c038e6bSDominik Dingel }
16613c038e6bSDominik Dingel 
16623c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
16633c038e6bSDominik Dingel {
16643c038e6bSDominik Dingel 	hva_t hva;
16653c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
16663c038e6bSDominik Dingel 	int rc;
16673c038e6bSDominik Dingel 
16683c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
16693c038e6bSDominik Dingel 		return 0;
16703c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
16713c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
16723c038e6bSDominik Dingel 		return 0;
16733c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
16743c038e6bSDominik Dingel 		return 0;
16759a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
16763c038e6bSDominik Dingel 		return 0;
16773c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
16783c038e6bSDominik Dingel 		return 0;
16793c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
16803c038e6bSDominik Dingel 		return 0;
16813c038e6bSDominik Dingel 
168281480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
168381480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
168481480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
16853c038e6bSDominik Dingel 		return 0;
16863c038e6bSDominik Dingel 
16873c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
16883c038e6bSDominik Dingel 	return rc;
16893c038e6bSDominik Dingel }
16903c038e6bSDominik Dingel 
16913fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1692b0c632dbSHeiko Carstens {
16933fb4c40fSThomas Huth 	int rc, cpuflags;
1694e168bf8dSCarsten Otte 
16953c038e6bSDominik Dingel 	/*
16963c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
16973c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
16983c038e6bSDominik Dingel 	 * handled outside the worker.
16993c038e6bSDominik Dingel 	 */
17003c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
17013c038e6bSDominik Dingel 
17025a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1703b0c632dbSHeiko Carstens 
1704b0c632dbSHeiko Carstens 	if (need_resched())
1705b0c632dbSHeiko Carstens 		schedule();
1706b0c632dbSHeiko Carstens 
1707d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
170871cde587SChristian Borntraeger 		s390_handle_mcck();
170971cde587SChristian Borntraeger 
171079395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
171179395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
171279395031SJens Freimann 		if (rc)
171379395031SJens Freimann 			return rc;
171479395031SJens Freimann 	}
17150ff31867SCarsten Otte 
17162c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
17172c70fe44SChristian Borntraeger 	if (rc)
17182c70fe44SChristian Borntraeger 		return rc;
17192c70fe44SChristian Borntraeger 
172027291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
172127291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
172227291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
172327291e21SDavid Hildenbrand 	}
172427291e21SDavid Hildenbrand 
1725b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
17263fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
17273fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
17283fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
17292b29a9fdSDominik Dingel 
17303fb4c40fSThomas Huth 	return 0;
17313fb4c40fSThomas Huth }
17323fb4c40fSThomas Huth 
1733492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1734492d8642SThomas Huth {
1735492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1736492d8642SThomas Huth 	u8 opcode;
1737492d8642SThomas Huth 	int rc;
1738492d8642SThomas Huth 
1739492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1740492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1741492d8642SThomas Huth 
1742492d8642SThomas Huth 	/*
1743492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1744492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1745492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1746492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1747492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1748492d8642SThomas Huth 	 * to be able to forward the PSW.
1749492d8642SThomas Huth 	 */
1750492d8642SThomas Huth 	rc = read_guest(vcpu, psw->addr, &opcode, 1);
1751492d8642SThomas Huth 	if (rc)
1752492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1753492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1754492d8642SThomas Huth 
1755492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1756492d8642SThomas Huth }
1757492d8642SThomas Huth 
17583fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
17593fb4c40fSThomas Huth {
176024eb3a82SDominik Dingel 	int rc = -1;
17612b29a9fdSDominik Dingel 
17622b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
17632b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
17642b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
17652b29a9fdSDominik Dingel 
176627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
176727291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
176827291e21SDavid Hildenbrand 
17693fb4c40fSThomas Huth 	if (exit_reason >= 0) {
17707c470539SMartin Schwidefsky 		rc = 0;
1771210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1772210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1773210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1774210b1607SThomas Huth 						current->thread.gmap_addr;
1775210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1776210b1607SThomas Huth 		rc = -EREMOTE;
177724eb3a82SDominik Dingel 
177824eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
17793c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
178024eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1781fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
178224eb3a82SDominik Dingel 			rc = 0;
1783fa576c58SThomas Huth 		} else {
1784fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1785fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1786fa576c58SThomas Huth 		}
178724eb3a82SDominik Dingel 	}
178824eb3a82SDominik Dingel 
1789492d8642SThomas Huth 	if (rc == -1)
1790492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
1791b0c632dbSHeiko Carstens 
17925a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
17933fb4c40fSThomas Huth 
1794a76ccff6SThomas Huth 	if (rc == 0) {
1795a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
17962955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
17972955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1798a76ccff6SThomas Huth 		else
1799a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1800a76ccff6SThomas Huth 	}
1801a76ccff6SThomas Huth 
18023fb4c40fSThomas Huth 	return rc;
18033fb4c40fSThomas Huth }
18043fb4c40fSThomas Huth 
18053fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
18063fb4c40fSThomas Huth {
18073fb4c40fSThomas Huth 	int rc, exit_reason;
18083fb4c40fSThomas Huth 
1809800c1065SThomas Huth 	/*
1810800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1811800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1812800c1065SThomas Huth 	 */
1813800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1814800c1065SThomas Huth 
1815a76ccff6SThomas Huth 	do {
18163fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
18173fb4c40fSThomas Huth 		if (rc)
1818a76ccff6SThomas Huth 			break;
18193fb4c40fSThomas Huth 
1820800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
18213fb4c40fSThomas Huth 		/*
1822a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1823a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
18243fb4c40fSThomas Huth 		 */
18253fb4c40fSThomas Huth 		preempt_disable();
18263fb4c40fSThomas Huth 		kvm_guest_enter();
18273fb4c40fSThomas Huth 		preempt_enable();
1828a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1829a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
18303fb4c40fSThomas Huth 		kvm_guest_exit();
1831800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
18323fb4c40fSThomas Huth 
18333fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
183427291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
18353fb4c40fSThomas Huth 
1836800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1837e168bf8dSCarsten Otte 	return rc;
1838b0c632dbSHeiko Carstens }
1839b0c632dbSHeiko Carstens 
1840b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1841b028ee3eSDavid Hildenbrand {
1842b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1843b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1844b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1845b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1846b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1847b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1848d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1849d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1850b028ee3eSDavid Hildenbrand 	}
1851b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1852b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1853b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1854b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1855b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1856b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1857b028ee3eSDavid Hildenbrand 	}
1858b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1859b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1860b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1861b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
18629fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
18639fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1864b028ee3eSDavid Hildenbrand 	}
1865b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1866b028ee3eSDavid Hildenbrand }
1867b028ee3eSDavid Hildenbrand 
1868b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1869b028ee3eSDavid Hildenbrand {
1870b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1871b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1872b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1873b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1874b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1875b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1876b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1877b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1878b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1879b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1880b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1881b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1882b028ee3eSDavid Hildenbrand }
1883b028ee3eSDavid Hildenbrand 
1884b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1885b0c632dbSHeiko Carstens {
18868f2abe6aSChristian Borntraeger 	int rc;
1887b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1888b0c632dbSHeiko Carstens 
188927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
189027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
189127291e21SDavid Hildenbrand 		return 0;
189227291e21SDavid Hildenbrand 	}
189327291e21SDavid Hildenbrand 
1894b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1895b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1896b0c632dbSHeiko Carstens 
18976352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
18986852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
18996352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
19006352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
19016352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
19026352e4d2SDavid Hildenbrand 		return -EINVAL;
19036352e4d2SDavid Hildenbrand 	}
1904b0c632dbSHeiko Carstens 
1905b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1906d7b0b5ebSCarsten Otte 
1907dab4079dSHeiko Carstens 	might_fault();
1908e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
19099ace903dSChristian Ehrhardt 
1910b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1911b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
19128f2abe6aSChristian Borntraeger 		rc = -EINTR;
1913b1d16c49SChristian Ehrhardt 	}
19148f2abe6aSChristian Borntraeger 
191527291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
191627291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
191727291e21SDavid Hildenbrand 		rc = 0;
191827291e21SDavid Hildenbrand 	}
191927291e21SDavid Hildenbrand 
1920b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
19218f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
19228f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
19238f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
19248f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
19258f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
19268f2abe6aSChristian Borntraeger 		rc = 0;
19278f2abe6aSChristian Borntraeger 	}
19288f2abe6aSChristian Borntraeger 
19298f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
19308f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
19318f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
19328f2abe6aSChristian Borntraeger 		rc = 0;
19338f2abe6aSChristian Borntraeger 	}
19348f2abe6aSChristian Borntraeger 
1935b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1936d7b0b5ebSCarsten Otte 
1937b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1938b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1939b0c632dbSHeiko Carstens 
1940b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
19417e8e6ab4SHeiko Carstens 	return rc;
1942b0c632dbSHeiko Carstens }
1943b0c632dbSHeiko Carstens 
1944b0c632dbSHeiko Carstens /*
1945b0c632dbSHeiko Carstens  * store status at address
1946b0c632dbSHeiko Carstens  * we use have two special cases:
1947b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1948b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1949b0c632dbSHeiko Carstens  */
1950d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1951b0c632dbSHeiko Carstens {
1952092670cdSCarsten Otte 	unsigned char archmode = 1;
1953fda902cbSMichael Mueller 	unsigned int px;
1954178bd789SThomas Huth 	u64 clkcomp;
1955d0bce605SHeiko Carstens 	int rc;
1956b0c632dbSHeiko Carstens 
1957d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1958d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1959b0c632dbSHeiko Carstens 			return -EFAULT;
1960d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1961d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1962d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1963b0c632dbSHeiko Carstens 			return -EFAULT;
1964d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1965d0bce605SHeiko Carstens 	}
1966d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1967d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1968d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1969d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1970d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1971d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1972fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1973d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1974fda902cbSMichael Mueller 			      &px, 4);
1975d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1976d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1977d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1978d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1979d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1980d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1981d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1982178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1983d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1984d0bce605SHeiko Carstens 			      &clkcomp, 8);
1985d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1986d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1987d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1988d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1989d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1990b0c632dbSHeiko Carstens }
1991b0c632dbSHeiko Carstens 
1992e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1993e879892cSThomas Huth {
1994e879892cSThomas Huth 	/*
1995e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1996e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1997e879892cSThomas Huth 	 * it into the save area
1998e879892cSThomas Huth 	 */
1999e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2000e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2001e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2002e879892cSThomas Huth 
2003e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2004e879892cSThomas Huth }
2005e879892cSThomas Huth 
20068ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20078ad35755SDavid Hildenbrand {
20088ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
20098ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
20108ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20118ad35755SDavid Hildenbrand }
20128ad35755SDavid Hildenbrand 
20138ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
20148ad35755SDavid Hildenbrand {
20158ad35755SDavid Hildenbrand 	unsigned int i;
20168ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
20178ad35755SDavid Hildenbrand 
20188ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
20198ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
20208ad35755SDavid Hildenbrand 	}
20218ad35755SDavid Hildenbrand }
20228ad35755SDavid Hildenbrand 
20238ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20248ad35755SDavid Hildenbrand {
20258ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
20268ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
20278ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20288ad35755SDavid Hildenbrand }
20298ad35755SDavid Hildenbrand 
20306852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
20316852d7b6SDavid Hildenbrand {
20328ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
20338ad35755SDavid Hildenbrand 
20348ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
20358ad35755SDavid Hildenbrand 		return;
20368ad35755SDavid Hildenbrand 
20376852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
20388ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2039433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
20408ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
20418ad35755SDavid Hildenbrand 
20428ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
20438ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
20448ad35755SDavid Hildenbrand 			started_vcpus++;
20458ad35755SDavid Hildenbrand 	}
20468ad35755SDavid Hildenbrand 
20478ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
20488ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
20498ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
20508ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
20518ad35755SDavid Hildenbrand 		/*
20528ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
20538ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
20548ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
20558ad35755SDavid Hildenbrand 		 */
20568ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
20578ad35755SDavid Hildenbrand 	}
20588ad35755SDavid Hildenbrand 
20596852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
20608ad35755SDavid Hildenbrand 	/*
20618ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
20628ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
20638ad35755SDavid Hildenbrand 	 */
2064d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2065433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
20668ad35755SDavid Hildenbrand 	return;
20676852d7b6SDavid Hildenbrand }
20686852d7b6SDavid Hildenbrand 
20696852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
20706852d7b6SDavid Hildenbrand {
20718ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
20728ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
20738ad35755SDavid Hildenbrand 
20748ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
20758ad35755SDavid Hildenbrand 		return;
20768ad35755SDavid Hildenbrand 
20776852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
20788ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2079433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
20808ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
20818ad35755SDavid Hildenbrand 
208232f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
20836cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
208432f5ff63SDavid Hildenbrand 
20856cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
20868ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
20878ad35755SDavid Hildenbrand 
20888ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
20898ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
20908ad35755SDavid Hildenbrand 			started_vcpus++;
20918ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
20928ad35755SDavid Hildenbrand 		}
20938ad35755SDavid Hildenbrand 	}
20948ad35755SDavid Hildenbrand 
20958ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
20968ad35755SDavid Hildenbrand 		/*
20978ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
20988ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
20998ad35755SDavid Hildenbrand 		 */
21008ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
21018ad35755SDavid Hildenbrand 	}
21028ad35755SDavid Hildenbrand 
2103433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21048ad35755SDavid Hildenbrand 	return;
21056852d7b6SDavid Hildenbrand }
21066852d7b6SDavid Hildenbrand 
2107d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2108d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2109d6712df9SCornelia Huck {
2110d6712df9SCornelia Huck 	int r;
2111d6712df9SCornelia Huck 
2112d6712df9SCornelia Huck 	if (cap->flags)
2113d6712df9SCornelia Huck 		return -EINVAL;
2114d6712df9SCornelia Huck 
2115d6712df9SCornelia Huck 	switch (cap->cap) {
2116fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2117fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2118fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2119fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2120fa6b7fe9SCornelia Huck 		}
2121fa6b7fe9SCornelia Huck 		r = 0;
2122fa6b7fe9SCornelia Huck 		break;
2123d6712df9SCornelia Huck 	default:
2124d6712df9SCornelia Huck 		r = -EINVAL;
2125d6712df9SCornelia Huck 		break;
2126d6712df9SCornelia Huck 	}
2127d6712df9SCornelia Huck 	return r;
2128d6712df9SCornelia Huck }
2129d6712df9SCornelia Huck 
2130b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2131b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2132b0c632dbSHeiko Carstens {
2133b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2134b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2135800c1065SThomas Huth 	int idx;
2136bc923cc9SAvi Kivity 	long r;
2137b0c632dbSHeiko Carstens 
213893736624SAvi Kivity 	switch (ioctl) {
213993736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2140ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2141383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2142ba5c1e9bSCarsten Otte 
214393736624SAvi Kivity 		r = -EFAULT;
2144ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
214593736624SAvi Kivity 			break;
2146383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2147383d0b05SJens Freimann 			return -EINVAL;
2148383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
214993736624SAvi Kivity 		break;
2150ba5c1e9bSCarsten Otte 	}
2151b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2152800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2153bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2154800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2155bc923cc9SAvi Kivity 		break;
2156b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2157b0c632dbSHeiko Carstens 		psw_t psw;
2158b0c632dbSHeiko Carstens 
2159bc923cc9SAvi Kivity 		r = -EFAULT;
2160b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2161bc923cc9SAvi Kivity 			break;
2162bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2163bc923cc9SAvi Kivity 		break;
2164b0c632dbSHeiko Carstens 	}
2165b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2166bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2167bc923cc9SAvi Kivity 		break;
216814eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
216914eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
217014eebd91SCarsten Otte 		struct kvm_one_reg reg;
217114eebd91SCarsten Otte 		r = -EFAULT;
217214eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
217314eebd91SCarsten Otte 			break;
217414eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
217514eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
217614eebd91SCarsten Otte 		else
217714eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
217814eebd91SCarsten Otte 		break;
217914eebd91SCarsten Otte 	}
218027e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
218127e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
218227e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
218327e0393fSCarsten Otte 
218427e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
218527e0393fSCarsten Otte 			r = -EFAULT;
218627e0393fSCarsten Otte 			break;
218727e0393fSCarsten Otte 		}
218827e0393fSCarsten Otte 
218927e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
219027e0393fSCarsten Otte 			r = -EINVAL;
219127e0393fSCarsten Otte 			break;
219227e0393fSCarsten Otte 		}
219327e0393fSCarsten Otte 
219427e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
219527e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
219627e0393fSCarsten Otte 		break;
219727e0393fSCarsten Otte 	}
219827e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
219927e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
220027e0393fSCarsten Otte 
220127e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
220227e0393fSCarsten Otte 			r = -EFAULT;
220327e0393fSCarsten Otte 			break;
220427e0393fSCarsten Otte 		}
220527e0393fSCarsten Otte 
220627e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
220727e0393fSCarsten Otte 			r = -EINVAL;
220827e0393fSCarsten Otte 			break;
220927e0393fSCarsten Otte 		}
221027e0393fSCarsten Otte 
221127e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
221227e0393fSCarsten Otte 			ucasmap.length);
221327e0393fSCarsten Otte 		break;
221427e0393fSCarsten Otte 	}
221527e0393fSCarsten Otte #endif
2216ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2217527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2218ccc7910fSCarsten Otte 		break;
2219ccc7910fSCarsten Otte 	}
2220d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2221d6712df9SCornelia Huck 	{
2222d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2223d6712df9SCornelia Huck 		r = -EFAULT;
2224d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2225d6712df9SCornelia Huck 			break;
2226d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2227d6712df9SCornelia Huck 		break;
2228d6712df9SCornelia Huck 	}
2229b0c632dbSHeiko Carstens 	default:
22303e6afcf1SCarsten Otte 		r = -ENOTTY;
2231b0c632dbSHeiko Carstens 	}
2232bc923cc9SAvi Kivity 	return r;
2233b0c632dbSHeiko Carstens }
2234b0c632dbSHeiko Carstens 
22355b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
22365b1c1493SCarsten Otte {
22375b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
22385b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
22395b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
22405b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
22415b1c1493SCarsten Otte 		get_page(vmf->page);
22425b1c1493SCarsten Otte 		return 0;
22435b1c1493SCarsten Otte 	}
22445b1c1493SCarsten Otte #endif
22455b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
22465b1c1493SCarsten Otte }
22475b1c1493SCarsten Otte 
22485587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
22495587027cSAneesh Kumar K.V 			    unsigned long npages)
2250db3fe4ebSTakuya Yoshikawa {
2251db3fe4ebSTakuya Yoshikawa 	return 0;
2252db3fe4ebSTakuya Yoshikawa }
2253db3fe4ebSTakuya Yoshikawa 
2254b0c632dbSHeiko Carstens /* Section: memory related */
2255f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2256f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
22577b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
22587b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2259b0c632dbSHeiko Carstens {
2260dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2261dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2262dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2263dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2264b0c632dbSHeiko Carstens 
2265598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2266b0c632dbSHeiko Carstens 		return -EINVAL;
2267b0c632dbSHeiko Carstens 
2268598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2269b0c632dbSHeiko Carstens 		return -EINVAL;
2270b0c632dbSHeiko Carstens 
2271f7784b8eSMarcelo Tosatti 	return 0;
2272f7784b8eSMarcelo Tosatti }
2273f7784b8eSMarcelo Tosatti 
2274f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
2275f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
22768482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
22778482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2278f7784b8eSMarcelo Tosatti {
2279f7850c92SCarsten Otte 	int rc;
2280f7784b8eSMarcelo Tosatti 
22812cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
22822cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
22832cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
22842cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
22852cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
22862cef4debSChristian Borntraeger 	 */
22872cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
22882cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
22892cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
22902cef4debSChristian Borntraeger 		return;
2291598841caSCarsten Otte 
2292598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2293598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2294598841caSCarsten Otte 	if (rc)
2295f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2296598841caSCarsten Otte 	return;
2297b0c632dbSHeiko Carstens }
2298b0c632dbSHeiko Carstens 
2299b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2300b0c632dbSHeiko Carstens {
23019d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2302b0c632dbSHeiko Carstens }
2303b0c632dbSHeiko Carstens 
2304b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2305b0c632dbSHeiko Carstens {
2306b0c632dbSHeiko Carstens 	kvm_exit();
2307b0c632dbSHeiko Carstens }
2308b0c632dbSHeiko Carstens 
2309b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2310b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2311566af940SCornelia Huck 
2312566af940SCornelia Huck /*
2313566af940SCornelia Huck  * Enable autoloading of the kvm module.
2314566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2315566af940SCornelia Huck  * since x86 takes a different approach.
2316566af940SCornelia Huck  */
2317566af940SCornelia Huck #include <linux/miscdevice.h>
2318566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2319566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2320