xref: /linux/arch/s390/kvm/kvm-s390.c (revision c92ea7b9f7d256cabf7ee08a7627a5227e356dec)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
2841408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31b0c632dbSHeiko Carstens #include <asm/pgtable.h>
32f5daba1dSHeiko Carstens #include <asm/nmi.h>
33a0616cdeSDavid Howells #include <asm/switch_to.h>
346d3da241SJens Freimann #include <asm/isc.h>
351526bf9cSChristian Borntraeger #include <asm/sclp.h>
368f2abe6aSChristian Borntraeger #include "kvm-s390.h"
37b0c632dbSHeiko Carstens #include "gaccess.h"
38b0c632dbSHeiko Carstens 
39ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
40ea2cdd27SDavid Hildenbrand #undef pr_fmt
41ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42ea2cdd27SDavid Hildenbrand 
435786fffaSCornelia Huck #define CREATE_TRACE_POINTS
445786fffaSCornelia Huck #include "trace.h"
45ade38c31SCornelia Huck #include "trace-s390.h"
465786fffaSCornelia Huck 
4741408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
48816c7667SJens Freimann #define LOCAL_IRQS 32
49816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
50816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5141408c28SThomas Huth 
52b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
53b0c632dbSHeiko Carstens 
54b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
55b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
560eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
578f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
588f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
598f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
608f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
61ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
62ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
63ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
64f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
65ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
66f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
67ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
68aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
69aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
70ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
717697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
72ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
73ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
74ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
75ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
76ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
77ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
78ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
7969d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
80453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
81453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
82453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
83453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
84453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
858a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
86453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
87453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
88b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
89453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
90453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
91bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
925288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
93bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
947697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
955288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
9642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
9742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
985288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
9942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
10042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
101cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1025288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1045288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
10542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
10642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
108388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
109e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
11041628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
111175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
112175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
113175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
114b0c632dbSHeiko Carstens 	{ NULL }
115b0c632dbSHeiko Carstens };
116b0c632dbSHeiko Carstens 
1179d8d5786SMichael Mueller /* upper facilities limit for kvm */
1189d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
119a3ed8daeSChristian Borntraeger 	0xffe6fffbfcfdfc40UL,
12053df84f8SGuenther Hutzl 	0x005e800000000000UL,
1219d8d5786SMichael Mueller };
122b0c632dbSHeiko Carstens 
1239d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
12478c4b59fSMichael Mueller {
1259d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1269d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
12778c4b59fSMichael Mueller }
12878c4b59fSMichael Mueller 
1299d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
13078f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1319d8d5786SMichael Mueller 
132b0c632dbSHeiko Carstens /* Section: not file related */
13313a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
134b0c632dbSHeiko Carstens {
135b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
13610474ae8SAlexander Graf 	return 0;
137b0c632dbSHeiko Carstens }
138b0c632dbSHeiko Carstens 
1392c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1402c70fe44SChristian Borntraeger 
141b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
142b0c632dbSHeiko Carstens {
1432c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1442c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
145b0c632dbSHeiko Carstens 	return 0;
146b0c632dbSHeiko Carstens }
147b0c632dbSHeiko Carstens 
148b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
149b0c632dbSHeiko Carstens {
1502c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
151b0c632dbSHeiko Carstens }
152b0c632dbSHeiko Carstens 
153b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
154b0c632dbSHeiko Carstens {
15578f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
15678f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
15778f26131SChristian Borntraeger 		return -ENOMEM;
15878f26131SChristian Borntraeger 
15978f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
16078f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
16178f26131SChristian Borntraeger 		return -ENOMEM;
16278f26131SChristian Borntraeger 	}
16378f26131SChristian Borntraeger 
16484877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
16584877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
166b0c632dbSHeiko Carstens }
167b0c632dbSHeiko Carstens 
16878f26131SChristian Borntraeger void kvm_arch_exit(void)
16978f26131SChristian Borntraeger {
17078f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
17178f26131SChristian Borntraeger }
17278f26131SChristian Borntraeger 
173b0c632dbSHeiko Carstens /* Section: device related */
174b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
175b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
176b0c632dbSHeiko Carstens {
177b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
178b0c632dbSHeiko Carstens 		return s390_enable_sie();
179b0c632dbSHeiko Carstens 	return -EINVAL;
180b0c632dbSHeiko Carstens }
181b0c632dbSHeiko Carstens 
182784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
183b0c632dbSHeiko Carstens {
184d7b0b5ebSCarsten Otte 	int r;
185d7b0b5ebSCarsten Otte 
1862bd0ac4eSCarsten Otte 	switch (ext) {
187d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
188b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
18952e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1901efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1911efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1921efd0f59SCarsten Otte #endif
1933c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
19460b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
19514eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
196d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
197fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
19810ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
199c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
200d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
20178599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
202f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
2036352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
20447b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
2052444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
206e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
20730ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
208816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
209d7b0b5ebSCarsten Otte 		r = 1;
210d7b0b5ebSCarsten Otte 		break;
21141408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
21241408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
21341408c28SThomas Huth 		break;
214e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
215e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
216e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
217e726b1bdSChristian Borntraeger 		break;
218e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
219e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
220e1e2e605SNick Wang 		break;
2211526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
222abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
2231526bf9cSChristian Borntraeger 		break;
22468c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
22568c55750SEric Farman 		r = MACHINE_HAS_VX;
22668c55750SEric Farman 		break;
2272bd0ac4eSCarsten Otte 	default:
228d7b0b5ebSCarsten Otte 		r = 0;
229b0c632dbSHeiko Carstens 	}
230d7b0b5ebSCarsten Otte 	return r;
2312bd0ac4eSCarsten Otte }
232b0c632dbSHeiko Carstens 
23315f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
23415f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
23515f36ebdSJason J. Herne {
23615f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
23715f36ebdSJason J. Herne 	unsigned long address;
23815f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
23915f36ebdSJason J. Herne 
24015f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
24115f36ebdSJason J. Herne 	/* Loop over all guest pages */
24215f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
24315f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
24415f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
24515f36ebdSJason J. Herne 
24615f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
24715f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
24815f36ebdSJason J. Herne 	}
24915f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
25015f36ebdSJason J. Herne }
25115f36ebdSJason J. Herne 
252b0c632dbSHeiko Carstens /* Section: vm related */
253b0c632dbSHeiko Carstens /*
254b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
255b0c632dbSHeiko Carstens  */
256b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
257b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
258b0c632dbSHeiko Carstens {
25915f36ebdSJason J. Herne 	int r;
26015f36ebdSJason J. Herne 	unsigned long n;
2619f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
26215f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
26315f36ebdSJason J. Herne 	int is_dirty = 0;
26415f36ebdSJason J. Herne 
26515f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
26615f36ebdSJason J. Herne 
26715f36ebdSJason J. Herne 	r = -EINVAL;
26815f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
26915f36ebdSJason J. Herne 		goto out;
27015f36ebdSJason J. Herne 
2719f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
2729f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
27315f36ebdSJason J. Herne 	r = -ENOENT;
27415f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
27515f36ebdSJason J. Herne 		goto out;
27615f36ebdSJason J. Herne 
27715f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
27815f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
27915f36ebdSJason J. Herne 	if (r)
28015f36ebdSJason J. Herne 		goto out;
28115f36ebdSJason J. Herne 
28215f36ebdSJason J. Herne 	/* Clear the dirty log */
28315f36ebdSJason J. Herne 	if (is_dirty) {
28415f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
28515f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
28615f36ebdSJason J. Herne 	}
28715f36ebdSJason J. Herne 	r = 0;
28815f36ebdSJason J. Herne out:
28915f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
29015f36ebdSJason J. Herne 	return r;
291b0c632dbSHeiko Carstens }
292b0c632dbSHeiko Carstens 
293d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
294d938dc55SCornelia Huck {
295d938dc55SCornelia Huck 	int r;
296d938dc55SCornelia Huck 
297d938dc55SCornelia Huck 	if (cap->flags)
298d938dc55SCornelia Huck 		return -EINVAL;
299d938dc55SCornelia Huck 
300d938dc55SCornelia Huck 	switch (cap->cap) {
30184223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
302*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
30384223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
30484223598SCornelia Huck 		r = 0;
30584223598SCornelia Huck 		break;
3062444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
307*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
3082444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
3092444b352SDavid Hildenbrand 		r = 0;
3102444b352SDavid Hildenbrand 		break;
31168c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
31218280d8bSMichael Mueller 		if (MACHINE_HAS_VX) {
31318280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->mask, 129);
31418280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->list, 129);
31518280d8bSMichael Mueller 			r = 0;
31618280d8bSMichael Mueller 		} else
31718280d8bSMichael Mueller 			r = -EINVAL;
318*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
319*c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
32068c55750SEric Farman 		break;
321e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
322*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
323e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
324e44fc8c9SEkaterina Tumanova 		r = 0;
325e44fc8c9SEkaterina Tumanova 		break;
326d938dc55SCornelia Huck 	default:
327d938dc55SCornelia Huck 		r = -EINVAL;
328d938dc55SCornelia Huck 		break;
329d938dc55SCornelia Huck 	}
330d938dc55SCornelia Huck 	return r;
331d938dc55SCornelia Huck }
332d938dc55SCornelia Huck 
3338c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3348c0a7ce6SDominik Dingel {
3358c0a7ce6SDominik Dingel 	int ret;
3368c0a7ce6SDominik Dingel 
3378c0a7ce6SDominik Dingel 	switch (attr->attr) {
3388c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
3398c0a7ce6SDominik Dingel 		ret = 0;
340*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
341*c92ea7b9SChristian Borntraeger 			 kvm->arch.gmap->asce_end);
3428c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
3438c0a7ce6SDominik Dingel 			ret = -EFAULT;
3448c0a7ce6SDominik Dingel 		break;
3458c0a7ce6SDominik Dingel 	default:
3468c0a7ce6SDominik Dingel 		ret = -ENXIO;
3478c0a7ce6SDominik Dingel 		break;
3488c0a7ce6SDominik Dingel 	}
3498c0a7ce6SDominik Dingel 	return ret;
3508c0a7ce6SDominik Dingel }
3518c0a7ce6SDominik Dingel 
3528c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3534f718eabSDominik Dingel {
3544f718eabSDominik Dingel 	int ret;
3554f718eabSDominik Dingel 	unsigned int idx;
3564f718eabSDominik Dingel 	switch (attr->attr) {
3574f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
358e6db1d61SDominik Dingel 		/* enable CMMA only for z10 and later (EDAT_1) */
359e6db1d61SDominik Dingel 		ret = -EINVAL;
360e6db1d61SDominik Dingel 		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
361e6db1d61SDominik Dingel 			break;
362e6db1d61SDominik Dingel 
3634f718eabSDominik Dingel 		ret = -EBUSY;
364*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
3654f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3664f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3674f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3684f718eabSDominik Dingel 			ret = 0;
3694f718eabSDominik Dingel 		}
3704f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3714f718eabSDominik Dingel 		break;
3724f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
373c3489155SDominik Dingel 		ret = -EINVAL;
374c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
375c3489155SDominik Dingel 			break;
376c3489155SDominik Dingel 
377*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
3784f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3794f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
380a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3814f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3824f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3834f718eabSDominik Dingel 		ret = 0;
3844f718eabSDominik Dingel 		break;
3858c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3868c0a7ce6SDominik Dingel 		unsigned long new_limit;
3878c0a7ce6SDominik Dingel 
3888c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3898c0a7ce6SDominik Dingel 			return -EINVAL;
3908c0a7ce6SDominik Dingel 
3918c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3928c0a7ce6SDominik Dingel 			return -EFAULT;
3938c0a7ce6SDominik Dingel 
3948c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3958c0a7ce6SDominik Dingel 			return -E2BIG;
3968c0a7ce6SDominik Dingel 
3978c0a7ce6SDominik Dingel 		ret = -EBUSY;
3988c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3998c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
4008c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
4018c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
4028c0a7ce6SDominik Dingel 
4038c0a7ce6SDominik Dingel 			if (!new) {
4048c0a7ce6SDominik Dingel 				ret = -ENOMEM;
4058c0a7ce6SDominik Dingel 			} else {
4068c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
4078c0a7ce6SDominik Dingel 				new->private = kvm;
4088c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
4098c0a7ce6SDominik Dingel 				ret = 0;
4108c0a7ce6SDominik Dingel 			}
4118c0a7ce6SDominik Dingel 		}
4128c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
413*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
4148c0a7ce6SDominik Dingel 		break;
4158c0a7ce6SDominik Dingel 	}
4164f718eabSDominik Dingel 	default:
4174f718eabSDominik Dingel 		ret = -ENXIO;
4184f718eabSDominik Dingel 		break;
4194f718eabSDominik Dingel 	}
4204f718eabSDominik Dingel 	return ret;
4214f718eabSDominik Dingel }
4224f718eabSDominik Dingel 
423a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
424a374e892STony Krowiak 
425a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
426a374e892STony Krowiak {
427a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
428a374e892STony Krowiak 	int i;
429a374e892STony Krowiak 
4309d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
431a374e892STony Krowiak 		return -EINVAL;
432a374e892STony Krowiak 
433a374e892STony Krowiak 	mutex_lock(&kvm->lock);
434a374e892STony Krowiak 	switch (attr->attr) {
435a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
436a374e892STony Krowiak 		get_random_bytes(
437a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
438a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
439a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
440*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
441a374e892STony Krowiak 		break;
442a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
443a374e892STony Krowiak 		get_random_bytes(
444a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
445a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
446a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
447*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
448a374e892STony Krowiak 		break;
449a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
450a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
451a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
452a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
453*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
454a374e892STony Krowiak 		break;
455a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
456a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
457a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
458a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
459*c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
460a374e892STony Krowiak 		break;
461a374e892STony Krowiak 	default:
462a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
463a374e892STony Krowiak 		return -ENXIO;
464a374e892STony Krowiak 	}
465a374e892STony Krowiak 
466a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
467a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
468a374e892STony Krowiak 		exit_sie(vcpu);
469a374e892STony Krowiak 	}
470a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
471a374e892STony Krowiak 	return 0;
472a374e892STony Krowiak }
473a374e892STony Krowiak 
47472f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
47572f25020SJason J. Herne {
47672f25020SJason J. Herne 	u8 gtod_high;
47772f25020SJason J. Herne 
47872f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
47972f25020SJason J. Herne 					   sizeof(gtod_high)))
48072f25020SJason J. Herne 		return -EFAULT;
48172f25020SJason J. Herne 
48272f25020SJason J. Herne 	if (gtod_high != 0)
48372f25020SJason J. Herne 		return -EINVAL;
484*c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
48572f25020SJason J. Herne 
48672f25020SJason J. Herne 	return 0;
48772f25020SJason J. Herne }
48872f25020SJason J. Herne 
48972f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
49072f25020SJason J. Herne {
49172f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
49272f25020SJason J. Herne 	unsigned int vcpu_idx;
49372f25020SJason J. Herne 	u64 host_tod, gtod;
49472f25020SJason J. Herne 	int r;
49572f25020SJason J. Herne 
49672f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
49772f25020SJason J. Herne 		return -EFAULT;
49872f25020SJason J. Herne 
49972f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
50072f25020SJason J. Herne 	if (r)
50172f25020SJason J. Herne 		return r;
50272f25020SJason J. Herne 
50372f25020SJason J. Herne 	mutex_lock(&kvm->lock);
50472f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
50527406cd5SChristian Borntraeger 	kvm_s390_vcpu_block_all(kvm);
50627406cd5SChristian Borntraeger 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
50772f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
50827406cd5SChristian Borntraeger 	kvm_s390_vcpu_unblock_all(kvm);
50972f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
510*c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
51172f25020SJason J. Herne 	return 0;
51272f25020SJason J. Herne }
51372f25020SJason J. Herne 
51472f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
51572f25020SJason J. Herne {
51672f25020SJason J. Herne 	int ret;
51772f25020SJason J. Herne 
51872f25020SJason J. Herne 	if (attr->flags)
51972f25020SJason J. Herne 		return -EINVAL;
52072f25020SJason J. Herne 
52172f25020SJason J. Herne 	switch (attr->attr) {
52272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
52372f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
52472f25020SJason J. Herne 		break;
52572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
52672f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
52772f25020SJason J. Herne 		break;
52872f25020SJason J. Herne 	default:
52972f25020SJason J. Herne 		ret = -ENXIO;
53072f25020SJason J. Herne 		break;
53172f25020SJason J. Herne 	}
53272f25020SJason J. Herne 	return ret;
53372f25020SJason J. Herne }
53472f25020SJason J. Herne 
53572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
53672f25020SJason J. Herne {
53772f25020SJason J. Herne 	u8 gtod_high = 0;
53872f25020SJason J. Herne 
53972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
54072f25020SJason J. Herne 					 sizeof(gtod_high)))
54172f25020SJason J. Herne 		return -EFAULT;
542*c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
54372f25020SJason J. Herne 
54472f25020SJason J. Herne 	return 0;
54572f25020SJason J. Herne }
54672f25020SJason J. Herne 
54772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
54872f25020SJason J. Herne {
54972f25020SJason J. Herne 	u64 host_tod, gtod;
55072f25020SJason J. Herne 	int r;
55172f25020SJason J. Herne 
55272f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
55372f25020SJason J. Herne 	if (r)
55472f25020SJason J. Herne 		return r;
55572f25020SJason J. Herne 
55672f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
55772f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
55872f25020SJason J. Herne 		return -EFAULT;
559*c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
56072f25020SJason J. Herne 
56172f25020SJason J. Herne 	return 0;
56272f25020SJason J. Herne }
56372f25020SJason J. Herne 
56472f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
56572f25020SJason J. Herne {
56672f25020SJason J. Herne 	int ret;
56772f25020SJason J. Herne 
56872f25020SJason J. Herne 	if (attr->flags)
56972f25020SJason J. Herne 		return -EINVAL;
57072f25020SJason J. Herne 
57172f25020SJason J. Herne 	switch (attr->attr) {
57272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
57372f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
57472f25020SJason J. Herne 		break;
57572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
57672f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
57772f25020SJason J. Herne 		break;
57872f25020SJason J. Herne 	default:
57972f25020SJason J. Herne 		ret = -ENXIO;
58072f25020SJason J. Herne 		break;
58172f25020SJason J. Herne 	}
58272f25020SJason J. Herne 	return ret;
58372f25020SJason J. Herne }
58472f25020SJason J. Herne 
585658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
586658b6edaSMichael Mueller {
587658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
588658b6edaSMichael Mueller 	int ret = 0;
589658b6edaSMichael Mueller 
590658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
591658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
592658b6edaSMichael Mueller 		ret = -EBUSY;
593658b6edaSMichael Mueller 		goto out;
594658b6edaSMichael Mueller 	}
595658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
596658b6edaSMichael Mueller 	if (!proc) {
597658b6edaSMichael Mueller 		ret = -ENOMEM;
598658b6edaSMichael Mueller 		goto out;
599658b6edaSMichael Mueller 	}
600658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
601658b6edaSMichael Mueller 			    sizeof(*proc))) {
602658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
603658b6edaSMichael Mueller 		       sizeof(struct cpuid));
604658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
605981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
606658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
607658b6edaSMichael Mueller 	} else
608658b6edaSMichael Mueller 		ret = -EFAULT;
609658b6edaSMichael Mueller 	kfree(proc);
610658b6edaSMichael Mueller out:
611658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
612658b6edaSMichael Mueller 	return ret;
613658b6edaSMichael Mueller }
614658b6edaSMichael Mueller 
615658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
616658b6edaSMichael Mueller {
617658b6edaSMichael Mueller 	int ret = -ENXIO;
618658b6edaSMichael Mueller 
619658b6edaSMichael Mueller 	switch (attr->attr) {
620658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
621658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
622658b6edaSMichael Mueller 		break;
623658b6edaSMichael Mueller 	}
624658b6edaSMichael Mueller 	return ret;
625658b6edaSMichael Mueller }
626658b6edaSMichael Mueller 
627658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
628658b6edaSMichael Mueller {
629658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
630658b6edaSMichael Mueller 	int ret = 0;
631658b6edaSMichael Mueller 
632658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
633658b6edaSMichael Mueller 	if (!proc) {
634658b6edaSMichael Mueller 		ret = -ENOMEM;
635658b6edaSMichael Mueller 		goto out;
636658b6edaSMichael Mueller 	}
637658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
638658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
639981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
640658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
641658b6edaSMichael Mueller 		ret = -EFAULT;
642658b6edaSMichael Mueller 	kfree(proc);
643658b6edaSMichael Mueller out:
644658b6edaSMichael Mueller 	return ret;
645658b6edaSMichael Mueller }
646658b6edaSMichael Mueller 
647658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
648658b6edaSMichael Mueller {
649658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
650658b6edaSMichael Mueller 	int ret = 0;
651658b6edaSMichael Mueller 
652658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
653658b6edaSMichael Mueller 	if (!mach) {
654658b6edaSMichael Mueller 		ret = -ENOMEM;
655658b6edaSMichael Mueller 		goto out;
656658b6edaSMichael Mueller 	}
657658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
65837c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
659981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
660981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
661658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
66294422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
663658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
664658b6edaSMichael Mueller 		ret = -EFAULT;
665658b6edaSMichael Mueller 	kfree(mach);
666658b6edaSMichael Mueller out:
667658b6edaSMichael Mueller 	return ret;
668658b6edaSMichael Mueller }
669658b6edaSMichael Mueller 
670658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
671658b6edaSMichael Mueller {
672658b6edaSMichael Mueller 	int ret = -ENXIO;
673658b6edaSMichael Mueller 
674658b6edaSMichael Mueller 	switch (attr->attr) {
675658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
676658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
677658b6edaSMichael Mueller 		break;
678658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
679658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
680658b6edaSMichael Mueller 		break;
681658b6edaSMichael Mueller 	}
682658b6edaSMichael Mueller 	return ret;
683658b6edaSMichael Mueller }
684658b6edaSMichael Mueller 
685f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
686f2061656SDominik Dingel {
687f2061656SDominik Dingel 	int ret;
688f2061656SDominik Dingel 
689f2061656SDominik Dingel 	switch (attr->group) {
6904f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6918c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6924f718eabSDominik Dingel 		break;
69372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
69472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
69572f25020SJason J. Herne 		break;
696658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
697658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
698658b6edaSMichael Mueller 		break;
699a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
700a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
701a374e892STony Krowiak 		break;
702f2061656SDominik Dingel 	default:
703f2061656SDominik Dingel 		ret = -ENXIO;
704f2061656SDominik Dingel 		break;
705f2061656SDominik Dingel 	}
706f2061656SDominik Dingel 
707f2061656SDominik Dingel 	return ret;
708f2061656SDominik Dingel }
709f2061656SDominik Dingel 
710f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
711f2061656SDominik Dingel {
7128c0a7ce6SDominik Dingel 	int ret;
7138c0a7ce6SDominik Dingel 
7148c0a7ce6SDominik Dingel 	switch (attr->group) {
7158c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7168c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
7178c0a7ce6SDominik Dingel 		break;
71872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
71972f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
72072f25020SJason J. Herne 		break;
721658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
722658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
723658b6edaSMichael Mueller 		break;
7248c0a7ce6SDominik Dingel 	default:
7258c0a7ce6SDominik Dingel 		ret = -ENXIO;
7268c0a7ce6SDominik Dingel 		break;
7278c0a7ce6SDominik Dingel 	}
7288c0a7ce6SDominik Dingel 
7298c0a7ce6SDominik Dingel 	return ret;
730f2061656SDominik Dingel }
731f2061656SDominik Dingel 
732f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
733f2061656SDominik Dingel {
734f2061656SDominik Dingel 	int ret;
735f2061656SDominik Dingel 
736f2061656SDominik Dingel 	switch (attr->group) {
7374f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7384f718eabSDominik Dingel 		switch (attr->attr) {
7394f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
7404f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
7418c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
7424f718eabSDominik Dingel 			ret = 0;
7434f718eabSDominik Dingel 			break;
7444f718eabSDominik Dingel 		default:
7454f718eabSDominik Dingel 			ret = -ENXIO;
7464f718eabSDominik Dingel 			break;
7474f718eabSDominik Dingel 		}
7484f718eabSDominik Dingel 		break;
74972f25020SJason J. Herne 	case KVM_S390_VM_TOD:
75072f25020SJason J. Herne 		switch (attr->attr) {
75172f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
75272f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
75372f25020SJason J. Herne 			ret = 0;
75472f25020SJason J. Herne 			break;
75572f25020SJason J. Herne 		default:
75672f25020SJason J. Herne 			ret = -ENXIO;
75772f25020SJason J. Herne 			break;
75872f25020SJason J. Herne 		}
75972f25020SJason J. Herne 		break;
760658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
761658b6edaSMichael Mueller 		switch (attr->attr) {
762658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
763658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
764658b6edaSMichael Mueller 			ret = 0;
765658b6edaSMichael Mueller 			break;
766658b6edaSMichael Mueller 		default:
767658b6edaSMichael Mueller 			ret = -ENXIO;
768658b6edaSMichael Mueller 			break;
769658b6edaSMichael Mueller 		}
770658b6edaSMichael Mueller 		break;
771a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
772a374e892STony Krowiak 		switch (attr->attr) {
773a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
774a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
775a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
776a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
777a374e892STony Krowiak 			ret = 0;
778a374e892STony Krowiak 			break;
779a374e892STony Krowiak 		default:
780a374e892STony Krowiak 			ret = -ENXIO;
781a374e892STony Krowiak 			break;
782a374e892STony Krowiak 		}
783a374e892STony Krowiak 		break;
784f2061656SDominik Dingel 	default:
785f2061656SDominik Dingel 		ret = -ENXIO;
786f2061656SDominik Dingel 		break;
787f2061656SDominik Dingel 	}
788f2061656SDominik Dingel 
789f2061656SDominik Dingel 	return ret;
790f2061656SDominik Dingel }
791f2061656SDominik Dingel 
79230ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
79330ee2a98SJason J. Herne {
79430ee2a98SJason J. Herne 	uint8_t *keys;
79530ee2a98SJason J. Herne 	uint64_t hva;
79630ee2a98SJason J. Herne 	unsigned long curkey;
79730ee2a98SJason J. Herne 	int i, r = 0;
79830ee2a98SJason J. Herne 
79930ee2a98SJason J. Herne 	if (args->flags != 0)
80030ee2a98SJason J. Herne 		return -EINVAL;
80130ee2a98SJason J. Herne 
80230ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
80330ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
80430ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
80530ee2a98SJason J. Herne 
80630ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
80730ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
80830ee2a98SJason J. Herne 		return -EINVAL;
80930ee2a98SJason J. Herne 
81030ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
81130ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
81230ee2a98SJason J. Herne 	if (!keys)
81330ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
81430ee2a98SJason J. Herne 	if (!keys)
81530ee2a98SJason J. Herne 		return -ENOMEM;
81630ee2a98SJason J. Herne 
81730ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
81830ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
81930ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
82030ee2a98SJason J. Herne 			r = -EFAULT;
82130ee2a98SJason J. Herne 			goto out;
82230ee2a98SJason J. Herne 		}
82330ee2a98SJason J. Herne 
82430ee2a98SJason J. Herne 		curkey = get_guest_storage_key(current->mm, hva);
82530ee2a98SJason J. Herne 		if (IS_ERR_VALUE(curkey)) {
82630ee2a98SJason J. Herne 			r = curkey;
82730ee2a98SJason J. Herne 			goto out;
82830ee2a98SJason J. Herne 		}
82930ee2a98SJason J. Herne 		keys[i] = curkey;
83030ee2a98SJason J. Herne 	}
83130ee2a98SJason J. Herne 
83230ee2a98SJason J. Herne 	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
83330ee2a98SJason J. Herne 			 sizeof(uint8_t) * args->count);
83430ee2a98SJason J. Herne 	if (r)
83530ee2a98SJason J. Herne 		r = -EFAULT;
83630ee2a98SJason J. Herne out:
83730ee2a98SJason J. Herne 	kvfree(keys);
83830ee2a98SJason J. Herne 	return r;
83930ee2a98SJason J. Herne }
84030ee2a98SJason J. Herne 
84130ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
84230ee2a98SJason J. Herne {
84330ee2a98SJason J. Herne 	uint8_t *keys;
84430ee2a98SJason J. Herne 	uint64_t hva;
84530ee2a98SJason J. Herne 	int i, r = 0;
84630ee2a98SJason J. Herne 
84730ee2a98SJason J. Herne 	if (args->flags != 0)
84830ee2a98SJason J. Herne 		return -EINVAL;
84930ee2a98SJason J. Herne 
85030ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
85130ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
85230ee2a98SJason J. Herne 		return -EINVAL;
85330ee2a98SJason J. Herne 
85430ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
85530ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
85630ee2a98SJason J. Herne 	if (!keys)
85730ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
85830ee2a98SJason J. Herne 	if (!keys)
85930ee2a98SJason J. Herne 		return -ENOMEM;
86030ee2a98SJason J. Herne 
86130ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
86230ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
86330ee2a98SJason J. Herne 	if (r) {
86430ee2a98SJason J. Herne 		r = -EFAULT;
86530ee2a98SJason J. Herne 		goto out;
86630ee2a98SJason J. Herne 	}
86730ee2a98SJason J. Herne 
86830ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
86914d4a425SDominik Dingel 	r = s390_enable_skey();
87014d4a425SDominik Dingel 	if (r)
87114d4a425SDominik Dingel 		goto out;
87230ee2a98SJason J. Herne 
87330ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
87430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
87530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
87630ee2a98SJason J. Herne 			r = -EFAULT;
87730ee2a98SJason J. Herne 			goto out;
87830ee2a98SJason J. Herne 		}
87930ee2a98SJason J. Herne 
88030ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
88130ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
88230ee2a98SJason J. Herne 			r = -EINVAL;
88330ee2a98SJason J. Herne 			goto out;
88430ee2a98SJason J. Herne 		}
88530ee2a98SJason J. Herne 
88630ee2a98SJason J. Herne 		r = set_guest_storage_key(current->mm, hva,
88730ee2a98SJason J. Herne 					  (unsigned long)keys[i], 0);
88830ee2a98SJason J. Herne 		if (r)
88930ee2a98SJason J. Herne 			goto out;
89030ee2a98SJason J. Herne 	}
89130ee2a98SJason J. Herne out:
89230ee2a98SJason J. Herne 	kvfree(keys);
89330ee2a98SJason J. Herne 	return r;
89430ee2a98SJason J. Herne }
89530ee2a98SJason J. Herne 
896b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
897b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
898b0c632dbSHeiko Carstens {
899b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
900b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
901f2061656SDominik Dingel 	struct kvm_device_attr attr;
902b0c632dbSHeiko Carstens 	int r;
903b0c632dbSHeiko Carstens 
904b0c632dbSHeiko Carstens 	switch (ioctl) {
905ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
906ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
907ba5c1e9bSCarsten Otte 
908ba5c1e9bSCarsten Otte 		r = -EFAULT;
909ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
910ba5c1e9bSCarsten Otte 			break;
911ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
912ba5c1e9bSCarsten Otte 		break;
913ba5c1e9bSCarsten Otte 	}
914d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
915d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
916d938dc55SCornelia Huck 		r = -EFAULT;
917d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
918d938dc55SCornelia Huck 			break;
919d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
920d938dc55SCornelia Huck 		break;
921d938dc55SCornelia Huck 	}
92284223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
92384223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
92484223598SCornelia Huck 
92584223598SCornelia Huck 		r = -EINVAL;
92684223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
92784223598SCornelia Huck 			/* Set up dummy routing. */
92884223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
92984223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
93084223598SCornelia Huck 			r = 0;
93184223598SCornelia Huck 		}
93284223598SCornelia Huck 		break;
93384223598SCornelia Huck 	}
934f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
935f2061656SDominik Dingel 		r = -EFAULT;
936f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
937f2061656SDominik Dingel 			break;
938f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
939f2061656SDominik Dingel 		break;
940f2061656SDominik Dingel 	}
941f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
942f2061656SDominik Dingel 		r = -EFAULT;
943f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
944f2061656SDominik Dingel 			break;
945f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
946f2061656SDominik Dingel 		break;
947f2061656SDominik Dingel 	}
948f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
949f2061656SDominik Dingel 		r = -EFAULT;
950f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
951f2061656SDominik Dingel 			break;
952f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
953f2061656SDominik Dingel 		break;
954f2061656SDominik Dingel 	}
95530ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
95630ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
95730ee2a98SJason J. Herne 
95830ee2a98SJason J. Herne 		r = -EFAULT;
95930ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
96030ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
96130ee2a98SJason J. Herne 			break;
96230ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
96330ee2a98SJason J. Herne 		break;
96430ee2a98SJason J. Herne 	}
96530ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
96630ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
96730ee2a98SJason J. Herne 
96830ee2a98SJason J. Herne 		r = -EFAULT;
96930ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
97030ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
97130ee2a98SJason J. Herne 			break;
97230ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
97330ee2a98SJason J. Herne 		break;
97430ee2a98SJason J. Herne 	}
975b0c632dbSHeiko Carstens 	default:
976367e1319SAvi Kivity 		r = -ENOTTY;
977b0c632dbSHeiko Carstens 	}
978b0c632dbSHeiko Carstens 
979b0c632dbSHeiko Carstens 	return r;
980b0c632dbSHeiko Carstens }
981b0c632dbSHeiko Carstens 
98245c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
98345c9b47cSTony Krowiak {
98445c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
98586044c8cSChristian Borntraeger 	u32 cc = 0;
98645c9b47cSTony Krowiak 
98786044c8cSChristian Borntraeger 	memset(config, 0, 128);
98845c9b47cSTony Krowiak 	asm volatile(
98945c9b47cSTony Krowiak 		"lgr 0,%1\n"
99045c9b47cSTony Krowiak 		"lgr 2,%2\n"
99145c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
99286044c8cSChristian Borntraeger 		"0: ipm %0\n"
99345c9b47cSTony Krowiak 		"srl %0,28\n"
99486044c8cSChristian Borntraeger 		"1:\n"
99586044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
99686044c8cSChristian Borntraeger 		: "+r" (cc)
99745c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
99845c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
99945c9b47cSTony Krowiak 	);
100045c9b47cSTony Krowiak 
100145c9b47cSTony Krowiak 	return cc;
100245c9b47cSTony Krowiak }
100345c9b47cSTony Krowiak 
100445c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
100545c9b47cSTony Krowiak {
100645c9b47cSTony Krowiak 	u8 config[128];
100745c9b47cSTony Krowiak 	int cc;
100845c9b47cSTony Krowiak 
100945c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
101045c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
101145c9b47cSTony Krowiak 
101245c9b47cSTony Krowiak 		if (cc)
101345c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
101445c9b47cSTony Krowiak 		else
101545c9b47cSTony Krowiak 			return config[0] & 0x40;
101645c9b47cSTony Krowiak 	}
101745c9b47cSTony Krowiak 
101845c9b47cSTony Krowiak 	return 0;
101945c9b47cSTony Krowiak }
102045c9b47cSTony Krowiak 
102145c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
102245c9b47cSTony Krowiak {
102345c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
102445c9b47cSTony Krowiak 
102545c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
102645c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
102745c9b47cSTony Krowiak 	else
102845c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
102945c9b47cSTony Krowiak }
103045c9b47cSTony Krowiak 
10319d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
10329d8d5786SMichael Mueller {
10339d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
10349d8d5786SMichael Mueller 	cpu_id->version = 0xff;
10359d8d5786SMichael Mueller }
10369d8d5786SMichael Mueller 
10375102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
10385102ee87STony Krowiak {
10399d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
10405102ee87STony Krowiak 		return 0;
10415102ee87STony Krowiak 
10425102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
10435102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
10445102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
10455102ee87STony Krowiak 		return -ENOMEM;
10465102ee87STony Krowiak 
104745c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
10485102ee87STony Krowiak 
1049ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1050ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1051ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1052ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1053ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1054ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1055ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1056a374e892STony Krowiak 
10575102ee87STony Krowiak 	return 0;
10585102ee87STony Krowiak }
10595102ee87STony Krowiak 
1060e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1061b0c632dbSHeiko Carstens {
10629d8d5786SMichael Mueller 	int i, rc;
1063b0c632dbSHeiko Carstens 	char debug_name[16];
1064f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1065b0c632dbSHeiko Carstens 
1066e08b9637SCarsten Otte 	rc = -EINVAL;
1067e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1068e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1069e08b9637SCarsten Otte 		goto out_err;
1070e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1071e08b9637SCarsten Otte 		goto out_err;
1072e08b9637SCarsten Otte #else
1073e08b9637SCarsten Otte 	if (type)
1074e08b9637SCarsten Otte 		goto out_err;
1075e08b9637SCarsten Otte #endif
1076e08b9637SCarsten Otte 
1077b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1078b0c632dbSHeiko Carstens 	if (rc)
1079d89f5effSJan Kiszka 		goto out_err;
1080b0c632dbSHeiko Carstens 
1081b290411aSCarsten Otte 	rc = -ENOMEM;
1082b290411aSCarsten Otte 
1083b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1084b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1085d89f5effSJan Kiszka 		goto out_err;
1086f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1087f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
1088f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1089f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1090b0c632dbSHeiko Carstens 
1091b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1092b0c632dbSHeiko Carstens 
10931cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1094b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
109540f5b735SDominik Dingel 		goto out_err;
1096b0c632dbSHeiko Carstens 
10979d8d5786SMichael Mueller 	/*
10989d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
10999d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
1100981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
1101981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
11029d8d5786SMichael Mueller 	 * 31 bits and word aligned.
11039d8d5786SMichael Mueller 	 */
11049d8d5786SMichael Mueller 	kvm->arch.model.fac =
1105981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
11069d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
110740f5b735SDominik Dingel 		goto out_err;
11089d8d5786SMichael Mueller 
1109fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1110981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
111194422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
11129d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
11139d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1114981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
11159d8d5786SMichael Mueller 		else
1116981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
11179d8d5786SMichael Mueller 	}
11189d8d5786SMichael Mueller 
1119981467c9SMichael Mueller 	/* Populate the facility list initially. */
1120981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1121981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1122981467c9SMichael Mueller 
11239d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
112437c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
11259d8d5786SMichael Mueller 
11265102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
112740f5b735SDominik Dingel 		goto out_err;
11285102ee87STony Krowiak 
1129ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
11306d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
11316d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
11328a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1133a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1134ba5c1e9bSCarsten Otte 
1135b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
113678f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1137b0c632dbSHeiko Carstens 
1138e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1139e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1140e08b9637SCarsten Otte 	} else {
11410349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1142598841caSCarsten Otte 		if (!kvm->arch.gmap)
114340f5b735SDominik Dingel 			goto out_err;
11442c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
114524eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1146e08b9637SCarsten Otte 	}
1147fa6b7fe9SCornelia Huck 
1148fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
114984223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
115072f25020SJason J. Herne 	kvm->arch.epoch = 0;
1151fa6b7fe9SCornelia Huck 
11528ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
115378f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
11548ad35755SDavid Hildenbrand 
1155d89f5effSJan Kiszka 	return 0;
1156d89f5effSJan Kiszka out_err:
115740f5b735SDominik Dingel 	kfree(kvm->arch.crypto.crycb);
115840f5b735SDominik Dingel 	free_page((unsigned long)kvm->arch.model.fac);
115940f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
116040f5b735SDominik Dingel 	free_page((unsigned long)(kvm->arch.sca));
116178f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1162d89f5effSJan Kiszka 	return rc;
1163b0c632dbSHeiko Carstens }
1164b0c632dbSHeiko Carstens 
1165d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1166d329c035SChristian Borntraeger {
1167d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1168ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
116967335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
11703c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
117158f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
117258f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
117358f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1174abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1175abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
1176abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
117758f9460bSCarsten Otte 	}
1178abf4a71eSCarsten Otte 	smp_mb();
117927e0393fSCarsten Otte 
118027e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
118127e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
118227e0393fSCarsten Otte 
1183e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1184b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1185d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1186b31288faSKonstantin Weitz 
11876692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1188b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1189d329c035SChristian Borntraeger }
1190d329c035SChristian Borntraeger 
1191d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1192d329c035SChristian Borntraeger {
1193d329c035SChristian Borntraeger 	unsigned int i;
1194988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1195d329c035SChristian Borntraeger 
1196988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1197988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1198988a2caeSGleb Natapov 
1199988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1200988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1201d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1202988a2caeSGleb Natapov 
1203988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1204988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1205d329c035SChristian Borntraeger }
1206d329c035SChristian Borntraeger 
1207b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1208b0c632dbSHeiko Carstens {
1209d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
12109d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1211b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1212d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
12135102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
121427e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1215598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1216841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
121767335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
121878f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
1219b0c632dbSHeiko Carstens }
1220b0c632dbSHeiko Carstens 
1221b0c632dbSHeiko Carstens /* Section: vcpu related */
1222dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1223b0c632dbSHeiko Carstens {
1224c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
122527e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
122627e0393fSCarsten Otte 		return -ENOMEM;
12272c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1228dafd032aSDominik Dingel 
122927e0393fSCarsten Otte 	return 0;
123027e0393fSCarsten Otte }
123127e0393fSCarsten Otte 
1232dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1233dafd032aSDominik Dingel {
1234dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1235dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
123659674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
123759674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
12389eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1239b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1240b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1241b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
124268c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
124368c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1244dafd032aSDominik Dingel 
1245dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1246dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1247dafd032aSDominik Dingel 
1248b0c632dbSHeiko Carstens 	return 0;
1249b0c632dbSHeiko Carstens }
1250b0c632dbSHeiko Carstens 
1251b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1252b0c632dbSHeiko Carstens {
12534725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
125418280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
125568c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
125668c55750SEric Farman 	else
12574725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1258b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
125918280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
126068c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
126168c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
126268c55750SEric Farman 	} else {
12634725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
12644725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
126568c55750SEric Farman 	}
126659674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1267480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
12689e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1269b0c632dbSHeiko Carstens }
1270b0c632dbSHeiko Carstens 
1271b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1272b0c632dbSHeiko Carstens {
12739e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1274480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
127518280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
127668c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
127768c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
127868c55750SEric Farman 	} else {
12794725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
12804725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
128168c55750SEric Farman 	}
128259674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
12834725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
128418280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
128568c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
128668c55750SEric Farman 	else
12874725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1288b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1289b0c632dbSHeiko Carstens }
1290b0c632dbSHeiko Carstens 
1291b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1292b0c632dbSHeiko Carstens {
1293b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1294b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1295b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
12968d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1297b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1298b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1299b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1300b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1301b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1302b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1303b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1304b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1305b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1306672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
13073c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
13083c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
13096352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
13106852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
13112ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1312b0c632dbSHeiko Carstens }
1313b0c632dbSHeiko Carstens 
131431928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
131542897d86SMarcelo Tosatti {
131672f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
131772f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
131872f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1319dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1320dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
132142897d86SMarcelo Tosatti }
132242897d86SMarcelo Tosatti 
13235102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
13245102ee87STony Krowiak {
13259d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
13265102ee87STony Krowiak 		return;
13275102ee87STony Krowiak 
1328a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1329a374e892STony Krowiak 
1330a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1331a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1332a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1333a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1334a374e892STony Krowiak 
13355102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
13365102ee87STony Krowiak }
13375102ee87STony Krowiak 
1338b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1339b31605c1SDominik Dingel {
1340b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1341b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1342b31605c1SDominik Dingel }
1343b31605c1SDominik Dingel 
1344b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1345b31605c1SDominik Dingel {
1346b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1347b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1348b31605c1SDominik Dingel 		return -ENOMEM;
1349b31605c1SDominik Dingel 
1350b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1351b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1352b31605c1SDominik Dingel 	return 0;
1353b31605c1SDominik Dingel }
1354b31605c1SDominik Dingel 
135591520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
135691520f1aSMichael Mueller {
135791520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
135891520f1aSMichael Mueller 
135991520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
136091520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
136191520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
136291520f1aSMichael Mueller }
136391520f1aSMichael Mueller 
1364b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1365b0c632dbSHeiko Carstens {
1366b31605c1SDominik Dingel 	int rc = 0;
1367b31288faSKonstantin Weitz 
13689e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
13699e6dabefSCornelia Huck 						    CPUSTAT_SM |
1370a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1371a4a4f191SGuenther Hutzl 
137253df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
137353df84f8SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
137453df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1375a4a4f191SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1376a4a4f191SGuenther Hutzl 
137791520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
137891520f1aSMichael Mueller 
1379fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
13809d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
13817feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
13827feb6bb8SMichael Mueller 
138369d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1384ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
138537c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1386217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
138737c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1388ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
138918280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
139013211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
139113211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
139213211ea7SEric Farman 	}
1393492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
13945a5e6536SMatthew Rosato 
1395e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1396b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1397b31605c1SDominik Dingel 		if (rc)
1398b31605c1SDominik Dingel 			return rc;
1399b31288faSKonstantin Weitz 	}
14000ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1401ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
14029d8d5786SMichael Mueller 
14035102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
14045102ee87STony Krowiak 
1405b31605c1SDominik Dingel 	return rc;
1406b0c632dbSHeiko Carstens }
1407b0c632dbSHeiko Carstens 
1408b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1409b0c632dbSHeiko Carstens 				      unsigned int id)
1410b0c632dbSHeiko Carstens {
14114d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
14127feb6bb8SMichael Mueller 	struct sie_page *sie_page;
14134d47555aSCarsten Otte 	int rc = -EINVAL;
1414b0c632dbSHeiko Carstens 
14154d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
14164d47555aSCarsten Otte 		goto out;
14174d47555aSCarsten Otte 
14184d47555aSCarsten Otte 	rc = -ENOMEM;
14194d47555aSCarsten Otte 
1420b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1421b0c632dbSHeiko Carstens 	if (!vcpu)
14224d47555aSCarsten Otte 		goto out;
1423b0c632dbSHeiko Carstens 
14247feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
14257feb6bb8SMichael Mueller 	if (!sie_page)
1426b0c632dbSHeiko Carstens 		goto out_free_cpu;
1427b0c632dbSHeiko Carstens 
14287feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
14297feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
143068c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
14317feb6bb8SMichael Mueller 
1432b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
143358f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
143458f9460bSCarsten Otte 		if (!kvm->arch.sca) {
143558f9460bSCarsten Otte 			WARN_ON_ONCE(1);
143658f9460bSCarsten Otte 			goto out_free_cpu;
143758f9460bSCarsten Otte 		}
1438abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
143958f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
144058f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
144158f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
144258f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1443b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1444fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
144558f9460bSCarsten Otte 	}
1446b0c632dbSHeiko Carstens 
1447ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1448ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1449d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
14505288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1451ba5c1e9bSCarsten Otte 
1452b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1453b0c632dbSHeiko Carstens 	if (rc)
14547b06bf2fSWei Yongjun 		goto out_free_sie_block;
1455b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1456b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1457ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1458b0c632dbSHeiko Carstens 
1459b0c632dbSHeiko Carstens 	return vcpu;
14607b06bf2fSWei Yongjun out_free_sie_block:
14617b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1462b0c632dbSHeiko Carstens out_free_cpu:
1463b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
14644d47555aSCarsten Otte out:
1465b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1466b0c632dbSHeiko Carstens }
1467b0c632dbSHeiko Carstens 
1468b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1469b0c632dbSHeiko Carstens {
14709a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1471b0c632dbSHeiko Carstens }
1472b0c632dbSHeiko Carstens 
147327406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
147449b99e1eSChristian Borntraeger {
147549b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
147661a6df54SDavid Hildenbrand 	exit_sie(vcpu);
147749b99e1eSChristian Borntraeger }
147849b99e1eSChristian Borntraeger 
147927406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
148049b99e1eSChristian Borntraeger {
148149b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
148249b99e1eSChristian Borntraeger }
148349b99e1eSChristian Borntraeger 
14848e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
14858e236546SChristian Borntraeger {
14868e236546SChristian Borntraeger 	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
148761a6df54SDavid Hildenbrand 	exit_sie(vcpu);
14888e236546SChristian Borntraeger }
14898e236546SChristian Borntraeger 
14908e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
14918e236546SChristian Borntraeger {
14928e236546SChristian Borntraeger 	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
14938e236546SChristian Borntraeger }
14948e236546SChristian Borntraeger 
149549b99e1eSChristian Borntraeger /*
149649b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
149749b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
149849b99e1eSChristian Borntraeger  * return immediately. */
149949b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
150049b99e1eSChristian Borntraeger {
150149b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
150249b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
150349b99e1eSChristian Borntraeger 		cpu_relax();
150449b99e1eSChristian Borntraeger }
150549b99e1eSChristian Borntraeger 
15068e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
15078e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
150849b99e1eSChristian Borntraeger {
15098e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
15108e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
151149b99e1eSChristian Borntraeger }
151249b99e1eSChristian Borntraeger 
15132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
15142c70fe44SChristian Borntraeger {
15152c70fe44SChristian Borntraeger 	int i;
15162c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
15172c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
15182c70fe44SChristian Borntraeger 
15192c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
15202c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1521fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
15222c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
15238e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
15242c70fe44SChristian Borntraeger 		}
15252c70fe44SChristian Borntraeger 	}
15262c70fe44SChristian Borntraeger }
15272c70fe44SChristian Borntraeger 
1528b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1529b6d33834SChristoffer Dall {
1530b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1531b6d33834SChristoffer Dall 	BUG();
1532b6d33834SChristoffer Dall 	return 0;
1533b6d33834SChristoffer Dall }
1534b6d33834SChristoffer Dall 
153514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
153614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
153714eebd91SCarsten Otte {
153814eebd91SCarsten Otte 	int r = -EINVAL;
153914eebd91SCarsten Otte 
154014eebd91SCarsten Otte 	switch (reg->id) {
154129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
154229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
154329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
154429b7c71bSCarsten Otte 		break;
154529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
154629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
154729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
154829b7c71bSCarsten Otte 		break;
154946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
155046a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
155146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
155246a6dd1cSJason J. herne 		break;
155346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
155446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
155546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
155646a6dd1cSJason J. herne 		break;
1557536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1558536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1559536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1560536336c2SDominik Dingel 		break;
1561536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1562536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1563536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1564536336c2SDominik Dingel 		break;
1565536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1566536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1567536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1568536336c2SDominik Dingel 		break;
1569672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1570672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1571672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1572672550fbSChristian Borntraeger 		break;
1573afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1574afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1575afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1576afa45ff5SChristian Borntraeger 		break;
157714eebd91SCarsten Otte 	default:
157814eebd91SCarsten Otte 		break;
157914eebd91SCarsten Otte 	}
158014eebd91SCarsten Otte 
158114eebd91SCarsten Otte 	return r;
158214eebd91SCarsten Otte }
158314eebd91SCarsten Otte 
158414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
158514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
158614eebd91SCarsten Otte {
158714eebd91SCarsten Otte 	int r = -EINVAL;
158814eebd91SCarsten Otte 
158914eebd91SCarsten Otte 	switch (reg->id) {
159029b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
159129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
159229b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
159329b7c71bSCarsten Otte 		break;
159429b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
159529b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
159629b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
159729b7c71bSCarsten Otte 		break;
159846a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
159946a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
160046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
160146a6dd1cSJason J. herne 		break;
160246a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
160346a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
160446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
160546a6dd1cSJason J. herne 		break;
1606536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1607536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1608536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
16099fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
16109fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1611536336c2SDominik Dingel 		break;
1612536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1613536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1614536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1615536336c2SDominik Dingel 		break;
1616536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1617536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1618536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1619536336c2SDominik Dingel 		break;
1620672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1621672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1622672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1623672550fbSChristian Borntraeger 		break;
1624afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1625afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1626afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1627afa45ff5SChristian Borntraeger 		break;
162814eebd91SCarsten Otte 	default:
162914eebd91SCarsten Otte 		break;
163014eebd91SCarsten Otte 	}
163114eebd91SCarsten Otte 
163214eebd91SCarsten Otte 	return r;
163314eebd91SCarsten Otte }
1634b6d33834SChristoffer Dall 
1635b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1636b0c632dbSHeiko Carstens {
1637b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1638b0c632dbSHeiko Carstens 	return 0;
1639b0c632dbSHeiko Carstens }
1640b0c632dbSHeiko Carstens 
1641b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1642b0c632dbSHeiko Carstens {
16435a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1644b0c632dbSHeiko Carstens 	return 0;
1645b0c632dbSHeiko Carstens }
1646b0c632dbSHeiko Carstens 
1647b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1648b0c632dbSHeiko Carstens {
16495a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1650b0c632dbSHeiko Carstens 	return 0;
1651b0c632dbSHeiko Carstens }
1652b0c632dbSHeiko Carstens 
1653b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1654b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1655b0c632dbSHeiko Carstens {
165659674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1657b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
165859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1659b0c632dbSHeiko Carstens 	return 0;
1660b0c632dbSHeiko Carstens }
1661b0c632dbSHeiko Carstens 
1662b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1663b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1664b0c632dbSHeiko Carstens {
166559674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1666b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1667b0c632dbSHeiko Carstens 	return 0;
1668b0c632dbSHeiko Carstens }
1669b0c632dbSHeiko Carstens 
1670b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1671b0c632dbSHeiko Carstens {
16724725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
16734725c860SMartin Schwidefsky 		return -EINVAL;
1674b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
16754725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
16764725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
16774725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1678b0c632dbSHeiko Carstens 	return 0;
1679b0c632dbSHeiko Carstens }
1680b0c632dbSHeiko Carstens 
1681b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1682b0c632dbSHeiko Carstens {
1683b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1684b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1685b0c632dbSHeiko Carstens 	return 0;
1686b0c632dbSHeiko Carstens }
1687b0c632dbSHeiko Carstens 
1688b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1689b0c632dbSHeiko Carstens {
1690b0c632dbSHeiko Carstens 	int rc = 0;
1691b0c632dbSHeiko Carstens 
16927a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1693b0c632dbSHeiko Carstens 		rc = -EBUSY;
1694d7b0b5ebSCarsten Otte 	else {
1695d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1696d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1697d7b0b5ebSCarsten Otte 	}
1698b0c632dbSHeiko Carstens 	return rc;
1699b0c632dbSHeiko Carstens }
1700b0c632dbSHeiko Carstens 
1701b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1702b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1703b0c632dbSHeiko Carstens {
1704b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1705b0c632dbSHeiko Carstens }
1706b0c632dbSHeiko Carstens 
170727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
170827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
170927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
171027291e21SDavid Hildenbrand 
1711d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1712d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1713b0c632dbSHeiko Carstens {
171427291e21SDavid Hildenbrand 	int rc = 0;
171527291e21SDavid Hildenbrand 
171627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
171727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
171827291e21SDavid Hildenbrand 
17192de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
172027291e21SDavid Hildenbrand 		return -EINVAL;
172127291e21SDavid Hildenbrand 
172227291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
172327291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
172427291e21SDavid Hildenbrand 		/* enforce guest PER */
172527291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
172627291e21SDavid Hildenbrand 
172727291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
172827291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
172927291e21SDavid Hildenbrand 	} else {
173027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
173127291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
173227291e21SDavid Hildenbrand 	}
173327291e21SDavid Hildenbrand 
173427291e21SDavid Hildenbrand 	if (rc) {
173527291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
173627291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
173727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
173827291e21SDavid Hildenbrand 	}
173927291e21SDavid Hildenbrand 
174027291e21SDavid Hildenbrand 	return rc;
1741b0c632dbSHeiko Carstens }
1742b0c632dbSHeiko Carstens 
174362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
174462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
174562d9f0dbSMarcelo Tosatti {
17466352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
17476352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
17486352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
174962d9f0dbSMarcelo Tosatti }
175062d9f0dbSMarcelo Tosatti 
175162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
175262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
175362d9f0dbSMarcelo Tosatti {
17546352e4d2SDavid Hildenbrand 	int rc = 0;
17556352e4d2SDavid Hildenbrand 
17566352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
17576352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
17586352e4d2SDavid Hildenbrand 
17596352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
17606352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
17616352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
17626352e4d2SDavid Hildenbrand 		break;
17636352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
17646352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
17656352e4d2SDavid Hildenbrand 		break;
17666352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
17676352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
17686352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
17696352e4d2SDavid Hildenbrand 	default:
17706352e4d2SDavid Hildenbrand 		rc = -ENXIO;
17716352e4d2SDavid Hildenbrand 	}
17726352e4d2SDavid Hildenbrand 
17736352e4d2SDavid Hildenbrand 	return rc;
177462d9f0dbSMarcelo Tosatti }
177562d9f0dbSMarcelo Tosatti 
17768ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
17778ad35755SDavid Hildenbrand {
17788ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
17798ad35755SDavid Hildenbrand }
17808ad35755SDavid Hildenbrand 
17812c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
17822c70fe44SChristian Borntraeger {
1783785dbef4SChristian Borntraeger 	if (!vcpu->requests)
1784785dbef4SChristian Borntraeger 		return 0;
17858ad35755SDavid Hildenbrand retry:
17868e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
17872c70fe44SChristian Borntraeger 	/*
17882c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
17892c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
17902c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
17912c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
17922c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
17932c70fe44SChristian Borntraeger 	 */
17948ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
17952c70fe44SChristian Borntraeger 		int rc;
17962c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1797fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
17982c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
17992c70fe44SChristian Borntraeger 		if (rc)
18002c70fe44SChristian Borntraeger 			return rc;
18018ad35755SDavid Hildenbrand 		goto retry;
18022c70fe44SChristian Borntraeger 	}
18038ad35755SDavid Hildenbrand 
1804d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1805d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1806d3d692c8SDavid Hildenbrand 		goto retry;
1807d3d692c8SDavid Hildenbrand 	}
1808d3d692c8SDavid Hildenbrand 
18098ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
18108ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
18118ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
18128ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
18138ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
18148ad35755SDavid Hildenbrand 		}
18158ad35755SDavid Hildenbrand 		goto retry;
18168ad35755SDavid Hildenbrand 	}
18178ad35755SDavid Hildenbrand 
18188ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
18198ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
18208ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
18218ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
18228ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
18238ad35755SDavid Hildenbrand 		}
18248ad35755SDavid Hildenbrand 		goto retry;
18258ad35755SDavid Hildenbrand 	}
18268ad35755SDavid Hildenbrand 
18270759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
18280759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
18290759d068SDavid Hildenbrand 
18302c70fe44SChristian Borntraeger 	return 0;
18312c70fe44SChristian Borntraeger }
18322c70fe44SChristian Borntraeger 
1833fa576c58SThomas Huth /**
1834fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1835fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1836fa576c58SThomas Huth  * @gpa: Guest physical address
1837fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1838fa576c58SThomas Huth  *
1839fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1840fa576c58SThomas Huth  *
1841fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1842fa576c58SThomas Huth  */
1843fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
184424eb3a82SDominik Dingel {
1845527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1846527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
184724eb3a82SDominik Dingel }
184824eb3a82SDominik Dingel 
18493c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
18503c038e6bSDominik Dingel 				      unsigned long token)
18513c038e6bSDominik Dingel {
18523c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1853383d0b05SJens Freimann 	struct kvm_s390_irq irq;
18543c038e6bSDominik Dingel 
18553c038e6bSDominik Dingel 	if (start_token) {
1856383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1857383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1858383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
18593c038e6bSDominik Dingel 	} else {
18603c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1861383d0b05SJens Freimann 		inti.parm64 = token;
18623c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
18633c038e6bSDominik Dingel 	}
18643c038e6bSDominik Dingel }
18653c038e6bSDominik Dingel 
18663c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
18673c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
18683c038e6bSDominik Dingel {
18693c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
18703c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
18713c038e6bSDominik Dingel }
18723c038e6bSDominik Dingel 
18733c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
18743c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
18753c038e6bSDominik Dingel {
18763c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
18773c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
18783c038e6bSDominik Dingel }
18793c038e6bSDominik Dingel 
18803c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
18813c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
18823c038e6bSDominik Dingel {
18833c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
18843c038e6bSDominik Dingel }
18853c038e6bSDominik Dingel 
18863c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
18873c038e6bSDominik Dingel {
18883c038e6bSDominik Dingel 	/*
18893c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
18903c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
18913c038e6bSDominik Dingel 	 */
18923c038e6bSDominik Dingel 	return true;
18933c038e6bSDominik Dingel }
18943c038e6bSDominik Dingel 
18953c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
18963c038e6bSDominik Dingel {
18973c038e6bSDominik Dingel 	hva_t hva;
18983c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
18993c038e6bSDominik Dingel 	int rc;
19003c038e6bSDominik Dingel 
19013c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
19023c038e6bSDominik Dingel 		return 0;
19033c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
19043c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
19053c038e6bSDominik Dingel 		return 0;
19063c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
19073c038e6bSDominik Dingel 		return 0;
19089a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
19093c038e6bSDominik Dingel 		return 0;
19103c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
19113c038e6bSDominik Dingel 		return 0;
19123c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
19133c038e6bSDominik Dingel 		return 0;
19143c038e6bSDominik Dingel 
191581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
191681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
191781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
19183c038e6bSDominik Dingel 		return 0;
19193c038e6bSDominik Dingel 
19203c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
19213c038e6bSDominik Dingel 	return rc;
19223c038e6bSDominik Dingel }
19233c038e6bSDominik Dingel 
19243fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1925b0c632dbSHeiko Carstens {
19263fb4c40fSThomas Huth 	int rc, cpuflags;
1927e168bf8dSCarsten Otte 
19283c038e6bSDominik Dingel 	/*
19293c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
19303c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
19313c038e6bSDominik Dingel 	 * handled outside the worker.
19323c038e6bSDominik Dingel 	 */
19333c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
19343c038e6bSDominik Dingel 
19355a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1936b0c632dbSHeiko Carstens 
1937b0c632dbSHeiko Carstens 	if (need_resched())
1938b0c632dbSHeiko Carstens 		schedule();
1939b0c632dbSHeiko Carstens 
1940d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
194171cde587SChristian Borntraeger 		s390_handle_mcck();
194271cde587SChristian Borntraeger 
194379395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
194479395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
194579395031SJens Freimann 		if (rc)
194679395031SJens Freimann 			return rc;
194779395031SJens Freimann 	}
19480ff31867SCarsten Otte 
19492c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
19502c70fe44SChristian Borntraeger 	if (rc)
19512c70fe44SChristian Borntraeger 		return rc;
19522c70fe44SChristian Borntraeger 
195327291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
195427291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
195527291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
195627291e21SDavid Hildenbrand 	}
195727291e21SDavid Hildenbrand 
1958b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
19593fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
19603fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
19613fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
19622b29a9fdSDominik Dingel 
19633fb4c40fSThomas Huth 	return 0;
19643fb4c40fSThomas Huth }
19653fb4c40fSThomas Huth 
1966492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1967492d8642SThomas Huth {
1968492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1969492d8642SThomas Huth 	u8 opcode;
1970492d8642SThomas Huth 	int rc;
1971492d8642SThomas Huth 
1972492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1973492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1974492d8642SThomas Huth 
1975492d8642SThomas Huth 	/*
1976492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1977492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1978492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1979492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1980492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1981492d8642SThomas Huth 	 * to be able to forward the PSW.
1982492d8642SThomas Huth 	 */
19838ae04b8fSAlexander Yarygin 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1984492d8642SThomas Huth 	if (rc)
1985492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1986492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1987492d8642SThomas Huth 
1988492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1989492d8642SThomas Huth }
1990492d8642SThomas Huth 
19913fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
19923fb4c40fSThomas Huth {
199324eb3a82SDominik Dingel 	int rc = -1;
19942b29a9fdSDominik Dingel 
19952b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
19962b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
19972b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
19982b29a9fdSDominik Dingel 
199927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
200027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
200127291e21SDavid Hildenbrand 
20023fb4c40fSThomas Huth 	if (exit_reason >= 0) {
20037c470539SMartin Schwidefsky 		rc = 0;
2004210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2005210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2006210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2007210b1607SThomas Huth 						current->thread.gmap_addr;
2008210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
2009210b1607SThomas Huth 		rc = -EREMOTE;
201024eb3a82SDominik Dingel 
201124eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
20123c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
201324eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
2014fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
201524eb3a82SDominik Dingel 			rc = 0;
2016fa576c58SThomas Huth 		} else {
2017fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
2018fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2019fa576c58SThomas Huth 		}
202024eb3a82SDominik Dingel 	}
202124eb3a82SDominik Dingel 
2022492d8642SThomas Huth 	if (rc == -1)
2023492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
2024b0c632dbSHeiko Carstens 
20255a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
20263fb4c40fSThomas Huth 
2027a76ccff6SThomas Huth 	if (rc == 0) {
2028a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
20292955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
20302955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
2031a76ccff6SThomas Huth 		else
2032a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
2033a76ccff6SThomas Huth 	}
2034a76ccff6SThomas Huth 
20353fb4c40fSThomas Huth 	return rc;
20363fb4c40fSThomas Huth }
20373fb4c40fSThomas Huth 
20383fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
20393fb4c40fSThomas Huth {
20403fb4c40fSThomas Huth 	int rc, exit_reason;
20413fb4c40fSThomas Huth 
2042800c1065SThomas Huth 	/*
2043800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2044800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2045800c1065SThomas Huth 	 */
2046800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2047800c1065SThomas Huth 
2048a76ccff6SThomas Huth 	do {
20493fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
20503fb4c40fSThomas Huth 		if (rc)
2051a76ccff6SThomas Huth 			break;
20523fb4c40fSThomas Huth 
2053800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
20543fb4c40fSThomas Huth 		/*
2055a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2056a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
20573fb4c40fSThomas Huth 		 */
20580097d12eSChristian Borntraeger 		local_irq_disable();
20590097d12eSChristian Borntraeger 		__kvm_guest_enter();
20600097d12eSChristian Borntraeger 		local_irq_enable();
2061a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2062a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
20630097d12eSChristian Borntraeger 		local_irq_disable();
20640097d12eSChristian Borntraeger 		__kvm_guest_exit();
20650097d12eSChristian Borntraeger 		local_irq_enable();
2066800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
20673fb4c40fSThomas Huth 
20683fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
206927291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
20703fb4c40fSThomas Huth 
2071800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2072e168bf8dSCarsten Otte 	return rc;
2073b0c632dbSHeiko Carstens }
2074b0c632dbSHeiko Carstens 
2075b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2076b028ee3eSDavid Hildenbrand {
2077b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2078b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2079b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2080b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2081b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2082b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2083d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2084d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2085b028ee3eSDavid Hildenbrand 	}
2086b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2087b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2088b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2089b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2090b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2091b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2092b028ee3eSDavid Hildenbrand 	}
2093b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2094b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2095b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2096b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
20979fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
20989fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2099b028ee3eSDavid Hildenbrand 	}
2100b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2101b028ee3eSDavid Hildenbrand }
2102b028ee3eSDavid Hildenbrand 
2103b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2104b028ee3eSDavid Hildenbrand {
2105b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2106b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2107b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2108b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2109b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2110b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2111b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2112b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2113b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2114b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2115b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2116b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2117b028ee3eSDavid Hildenbrand }
2118b028ee3eSDavid Hildenbrand 
2119b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2120b0c632dbSHeiko Carstens {
21218f2abe6aSChristian Borntraeger 	int rc;
2122b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2123b0c632dbSHeiko Carstens 
212427291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
212527291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
212627291e21SDavid Hildenbrand 		return 0;
212727291e21SDavid Hildenbrand 	}
212827291e21SDavid Hildenbrand 
2129b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2130b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2131b0c632dbSHeiko Carstens 
21326352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
21336852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
21346352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2135ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
21366352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
21376352e4d2SDavid Hildenbrand 		return -EINVAL;
21386352e4d2SDavid Hildenbrand 	}
2139b0c632dbSHeiko Carstens 
2140b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2141d7b0b5ebSCarsten Otte 
2142dab4079dSHeiko Carstens 	might_fault();
2143e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
21449ace903dSChristian Ehrhardt 
2145b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2146b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
21478f2abe6aSChristian Borntraeger 		rc = -EINTR;
2148b1d16c49SChristian Ehrhardt 	}
21498f2abe6aSChristian Borntraeger 
215027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
215127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
215227291e21SDavid Hildenbrand 		rc = 0;
215327291e21SDavid Hildenbrand 	}
215427291e21SDavid Hildenbrand 
2155b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
21568f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
21578f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
21588f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
21598f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
21608f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
21618f2abe6aSChristian Borntraeger 		rc = 0;
21628f2abe6aSChristian Borntraeger 	}
21638f2abe6aSChristian Borntraeger 
21648f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
21658f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
21668f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
21678f2abe6aSChristian Borntraeger 		rc = 0;
21688f2abe6aSChristian Borntraeger 	}
21698f2abe6aSChristian Borntraeger 
2170b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2171d7b0b5ebSCarsten Otte 
2172b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2173b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2174b0c632dbSHeiko Carstens 
2175b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
21767e8e6ab4SHeiko Carstens 	return rc;
2177b0c632dbSHeiko Carstens }
2178b0c632dbSHeiko Carstens 
2179b0c632dbSHeiko Carstens /*
2180b0c632dbSHeiko Carstens  * store status at address
2181b0c632dbSHeiko Carstens  * we use have two special cases:
2182b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2183b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2184b0c632dbSHeiko Carstens  */
2185d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2186b0c632dbSHeiko Carstens {
2187092670cdSCarsten Otte 	unsigned char archmode = 1;
2188fda902cbSMichael Mueller 	unsigned int px;
2189178bd789SThomas Huth 	u64 clkcomp;
2190d0bce605SHeiko Carstens 	int rc;
2191b0c632dbSHeiko Carstens 
2192d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2193d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2194b0c632dbSHeiko Carstens 			return -EFAULT;
2195d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
2196d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2197d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2198b0c632dbSHeiko Carstens 			return -EFAULT;
2199d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2200d0bce605SHeiko Carstens 	}
2201d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2202d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2203d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2204d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2205d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2206d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2207fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2208d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2209fda902cbSMichael Mueller 			      &px, 4);
2210d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2211d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2212d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2213d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2214d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2215d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2216d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2217178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2218d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2219d0bce605SHeiko Carstens 			      &clkcomp, 8);
2220d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2221d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2222d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2223d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2224d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2225b0c632dbSHeiko Carstens }
2226b0c632dbSHeiko Carstens 
2227e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2228e879892cSThomas Huth {
2229e879892cSThomas Huth 	/*
2230e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2231e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2232e879892cSThomas Huth 	 * it into the save area
2233e879892cSThomas Huth 	 */
2234e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2235e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2236e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2237e879892cSThomas Huth 
2238e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2239e879892cSThomas Huth }
2240e879892cSThomas Huth 
2241bc17de7cSEric Farman /*
2242bc17de7cSEric Farman  * store additional status at address
2243bc17de7cSEric Farman  */
2244bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2245bc17de7cSEric Farman 					unsigned long gpa)
2246bc17de7cSEric Farman {
2247bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2248bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2249bc17de7cSEric Farman 		return 0;
2250bc17de7cSEric Farman 
2251bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2252bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2253bc17de7cSEric Farman }
2254bc17de7cSEric Farman 
2255bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2256bc17de7cSEric Farman {
2257bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2258bc17de7cSEric Farman 		return 0;
2259bc17de7cSEric Farman 
2260bc17de7cSEric Farman 	/*
2261bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2262bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2263bc17de7cSEric Farman 	 * it into the save area.
2264bc17de7cSEric Farman 	 */
2265bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2266bc17de7cSEric Farman 
2267bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2268bc17de7cSEric Farman }
2269bc17de7cSEric Farman 
22708ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
22718ad35755SDavid Hildenbrand {
22728ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
22738e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
22748ad35755SDavid Hildenbrand }
22758ad35755SDavid Hildenbrand 
22768ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
22778ad35755SDavid Hildenbrand {
22788ad35755SDavid Hildenbrand 	unsigned int i;
22798ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
22808ad35755SDavid Hildenbrand 
22818ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
22828ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
22838ad35755SDavid Hildenbrand 	}
22848ad35755SDavid Hildenbrand }
22858ad35755SDavid Hildenbrand 
22868ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
22878ad35755SDavid Hildenbrand {
22888ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
22898e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
22908ad35755SDavid Hildenbrand }
22918ad35755SDavid Hildenbrand 
22926852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
22936852d7b6SDavid Hildenbrand {
22948ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
22958ad35755SDavid Hildenbrand 
22968ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
22978ad35755SDavid Hildenbrand 		return;
22988ad35755SDavid Hildenbrand 
22996852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
23008ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2301433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23028ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23038ad35755SDavid Hildenbrand 
23048ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23058ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
23068ad35755SDavid Hildenbrand 			started_vcpus++;
23078ad35755SDavid Hildenbrand 	}
23088ad35755SDavid Hildenbrand 
23098ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
23108ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
23118ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
23128ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
23138ad35755SDavid Hildenbrand 		/*
23148ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
23158ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
23168ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
23178ad35755SDavid Hildenbrand 		 */
23188ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
23198ad35755SDavid Hildenbrand 	}
23208ad35755SDavid Hildenbrand 
23216852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23228ad35755SDavid Hildenbrand 	/*
23238ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
23248ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
23258ad35755SDavid Hildenbrand 	 */
2326d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2327433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
23288ad35755SDavid Hildenbrand 	return;
23296852d7b6SDavid Hildenbrand }
23306852d7b6SDavid Hildenbrand 
23316852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
23326852d7b6SDavid Hildenbrand {
23338ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
23348ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
23358ad35755SDavid Hildenbrand 
23368ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
23378ad35755SDavid Hildenbrand 		return;
23388ad35755SDavid Hildenbrand 
23396852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
23408ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2341433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23428ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23438ad35755SDavid Hildenbrand 
234432f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
23456cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
234632f5ff63SDavid Hildenbrand 
23476cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23488ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
23498ad35755SDavid Hildenbrand 
23508ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23518ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
23528ad35755SDavid Hildenbrand 			started_vcpus++;
23538ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
23548ad35755SDavid Hildenbrand 		}
23558ad35755SDavid Hildenbrand 	}
23568ad35755SDavid Hildenbrand 
23578ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
23588ad35755SDavid Hildenbrand 		/*
23598ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
23608ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
23618ad35755SDavid Hildenbrand 		 */
23628ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
23638ad35755SDavid Hildenbrand 	}
23648ad35755SDavid Hildenbrand 
2365433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
23668ad35755SDavid Hildenbrand 	return;
23676852d7b6SDavid Hildenbrand }
23686852d7b6SDavid Hildenbrand 
2369d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2370d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2371d6712df9SCornelia Huck {
2372d6712df9SCornelia Huck 	int r;
2373d6712df9SCornelia Huck 
2374d6712df9SCornelia Huck 	if (cap->flags)
2375d6712df9SCornelia Huck 		return -EINVAL;
2376d6712df9SCornelia Huck 
2377d6712df9SCornelia Huck 	switch (cap->cap) {
2378fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2379fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2380fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2381*c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2382fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2383fa6b7fe9SCornelia Huck 		}
2384fa6b7fe9SCornelia Huck 		r = 0;
2385fa6b7fe9SCornelia Huck 		break;
2386d6712df9SCornelia Huck 	default:
2387d6712df9SCornelia Huck 		r = -EINVAL;
2388d6712df9SCornelia Huck 		break;
2389d6712df9SCornelia Huck 	}
2390d6712df9SCornelia Huck 	return r;
2391d6712df9SCornelia Huck }
2392d6712df9SCornelia Huck 
239341408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
239441408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
239541408c28SThomas Huth {
239641408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
239741408c28SThomas Huth 	void *tmpbuf = NULL;
239841408c28SThomas Huth 	int r, srcu_idx;
239941408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
240041408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
240141408c28SThomas Huth 
240241408c28SThomas Huth 	if (mop->flags & ~supported_flags)
240341408c28SThomas Huth 		return -EINVAL;
240441408c28SThomas Huth 
240541408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
240641408c28SThomas Huth 		return -E2BIG;
240741408c28SThomas Huth 
240841408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
240941408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
241041408c28SThomas Huth 		if (!tmpbuf)
241141408c28SThomas Huth 			return -ENOMEM;
241241408c28SThomas Huth 	}
241341408c28SThomas Huth 
241441408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
241541408c28SThomas Huth 
241641408c28SThomas Huth 	switch (mop->op) {
241741408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
241841408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
241941408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
242041408c28SThomas Huth 			break;
242141408c28SThomas Huth 		}
242241408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
242341408c28SThomas Huth 		if (r == 0) {
242441408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
242541408c28SThomas Huth 				r = -EFAULT;
242641408c28SThomas Huth 		}
242741408c28SThomas Huth 		break;
242841408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
242941408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
243041408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
243141408c28SThomas Huth 			break;
243241408c28SThomas Huth 		}
243341408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
243441408c28SThomas Huth 			r = -EFAULT;
243541408c28SThomas Huth 			break;
243641408c28SThomas Huth 		}
243741408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
243841408c28SThomas Huth 		break;
243941408c28SThomas Huth 	default:
244041408c28SThomas Huth 		r = -EINVAL;
244141408c28SThomas Huth 	}
244241408c28SThomas Huth 
244341408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
244441408c28SThomas Huth 
244541408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
244641408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
244741408c28SThomas Huth 
244841408c28SThomas Huth 	vfree(tmpbuf);
244941408c28SThomas Huth 	return r;
245041408c28SThomas Huth }
245141408c28SThomas Huth 
2452b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2453b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2454b0c632dbSHeiko Carstens {
2455b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2456b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2457800c1065SThomas Huth 	int idx;
2458bc923cc9SAvi Kivity 	long r;
2459b0c632dbSHeiko Carstens 
246093736624SAvi Kivity 	switch (ioctl) {
246147b43c52SJens Freimann 	case KVM_S390_IRQ: {
246247b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
246347b43c52SJens Freimann 
246447b43c52SJens Freimann 		r = -EFAULT;
246547b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
246647b43c52SJens Freimann 			break;
246747b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
246847b43c52SJens Freimann 		break;
246947b43c52SJens Freimann 	}
247093736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2471ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2472383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2473ba5c1e9bSCarsten Otte 
247493736624SAvi Kivity 		r = -EFAULT;
2475ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
247693736624SAvi Kivity 			break;
2477383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2478383d0b05SJens Freimann 			return -EINVAL;
2479383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
248093736624SAvi Kivity 		break;
2481ba5c1e9bSCarsten Otte 	}
2482b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2483800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2484bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2485800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2486bc923cc9SAvi Kivity 		break;
2487b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2488b0c632dbSHeiko Carstens 		psw_t psw;
2489b0c632dbSHeiko Carstens 
2490bc923cc9SAvi Kivity 		r = -EFAULT;
2491b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2492bc923cc9SAvi Kivity 			break;
2493bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2494bc923cc9SAvi Kivity 		break;
2495b0c632dbSHeiko Carstens 	}
2496b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2497bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2498bc923cc9SAvi Kivity 		break;
249914eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
250014eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
250114eebd91SCarsten Otte 		struct kvm_one_reg reg;
250214eebd91SCarsten Otte 		r = -EFAULT;
250314eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
250414eebd91SCarsten Otte 			break;
250514eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
250614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
250714eebd91SCarsten Otte 		else
250814eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
250914eebd91SCarsten Otte 		break;
251014eebd91SCarsten Otte 	}
251127e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
251227e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
251327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
251427e0393fSCarsten Otte 
251527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
251627e0393fSCarsten Otte 			r = -EFAULT;
251727e0393fSCarsten Otte 			break;
251827e0393fSCarsten Otte 		}
251927e0393fSCarsten Otte 
252027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
252127e0393fSCarsten Otte 			r = -EINVAL;
252227e0393fSCarsten Otte 			break;
252327e0393fSCarsten Otte 		}
252427e0393fSCarsten Otte 
252527e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
252627e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
252727e0393fSCarsten Otte 		break;
252827e0393fSCarsten Otte 	}
252927e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
253027e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
253127e0393fSCarsten Otte 
253227e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
253327e0393fSCarsten Otte 			r = -EFAULT;
253427e0393fSCarsten Otte 			break;
253527e0393fSCarsten Otte 		}
253627e0393fSCarsten Otte 
253727e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
253827e0393fSCarsten Otte 			r = -EINVAL;
253927e0393fSCarsten Otte 			break;
254027e0393fSCarsten Otte 		}
254127e0393fSCarsten Otte 
254227e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
254327e0393fSCarsten Otte 			ucasmap.length);
254427e0393fSCarsten Otte 		break;
254527e0393fSCarsten Otte 	}
254627e0393fSCarsten Otte #endif
2547ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2548527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2549ccc7910fSCarsten Otte 		break;
2550ccc7910fSCarsten Otte 	}
2551d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2552d6712df9SCornelia Huck 	{
2553d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2554d6712df9SCornelia Huck 		r = -EFAULT;
2555d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2556d6712df9SCornelia Huck 			break;
2557d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2558d6712df9SCornelia Huck 		break;
2559d6712df9SCornelia Huck 	}
256041408c28SThomas Huth 	case KVM_S390_MEM_OP: {
256141408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
256241408c28SThomas Huth 
256341408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
256441408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
256541408c28SThomas Huth 		else
256641408c28SThomas Huth 			r = -EFAULT;
256741408c28SThomas Huth 		break;
256841408c28SThomas Huth 	}
2569816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
2570816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2571816c7667SJens Freimann 
2572816c7667SJens Freimann 		r = -EFAULT;
2573816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2574816c7667SJens Freimann 			break;
2575816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2576816c7667SJens Freimann 		    irq_state.len == 0 ||
2577816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2578816c7667SJens Freimann 			r = -EINVAL;
2579816c7667SJens Freimann 			break;
2580816c7667SJens Freimann 		}
2581816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
2582816c7667SJens Freimann 					   (void __user *) irq_state.buf,
2583816c7667SJens Freimann 					   irq_state.len);
2584816c7667SJens Freimann 		break;
2585816c7667SJens Freimann 	}
2586816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
2587816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2588816c7667SJens Freimann 
2589816c7667SJens Freimann 		r = -EFAULT;
2590816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2591816c7667SJens Freimann 			break;
2592816c7667SJens Freimann 		if (irq_state.len == 0) {
2593816c7667SJens Freimann 			r = -EINVAL;
2594816c7667SJens Freimann 			break;
2595816c7667SJens Freimann 		}
2596816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
2597816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
2598816c7667SJens Freimann 					   irq_state.len);
2599816c7667SJens Freimann 		break;
2600816c7667SJens Freimann 	}
2601b0c632dbSHeiko Carstens 	default:
26023e6afcf1SCarsten Otte 		r = -ENOTTY;
2603b0c632dbSHeiko Carstens 	}
2604bc923cc9SAvi Kivity 	return r;
2605b0c632dbSHeiko Carstens }
2606b0c632dbSHeiko Carstens 
26075b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
26085b1c1493SCarsten Otte {
26095b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
26105b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
26115b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
26125b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
26135b1c1493SCarsten Otte 		get_page(vmf->page);
26145b1c1493SCarsten Otte 		return 0;
26155b1c1493SCarsten Otte 	}
26165b1c1493SCarsten Otte #endif
26175b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
26185b1c1493SCarsten Otte }
26195b1c1493SCarsten Otte 
26205587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
26215587027cSAneesh Kumar K.V 			    unsigned long npages)
2622db3fe4ebSTakuya Yoshikawa {
2623db3fe4ebSTakuya Yoshikawa 	return 0;
2624db3fe4ebSTakuya Yoshikawa }
2625db3fe4ebSTakuya Yoshikawa 
2626b0c632dbSHeiko Carstens /* Section: memory related */
2627f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2628f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
262909170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
26307b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2631b0c632dbSHeiko Carstens {
2632dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2633dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2634dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2635dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2636b0c632dbSHeiko Carstens 
2637598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2638b0c632dbSHeiko Carstens 		return -EINVAL;
2639b0c632dbSHeiko Carstens 
2640598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2641b0c632dbSHeiko Carstens 		return -EINVAL;
2642b0c632dbSHeiko Carstens 
2643f7784b8eSMarcelo Tosatti 	return 0;
2644f7784b8eSMarcelo Tosatti }
2645f7784b8eSMarcelo Tosatti 
2646f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
264709170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
26488482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
2649f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
26508482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2651f7784b8eSMarcelo Tosatti {
2652f7850c92SCarsten Otte 	int rc;
2653f7784b8eSMarcelo Tosatti 
26542cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
26552cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
26562cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
26572cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
26582cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
26592cef4debSChristian Borntraeger 	 */
26602cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
26612cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
26622cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
26632cef4debSChristian Borntraeger 		return;
2664598841caSCarsten Otte 
2665598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2666598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2667598841caSCarsten Otte 	if (rc)
2668ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
2669598841caSCarsten Otte 	return;
2670b0c632dbSHeiko Carstens }
2671b0c632dbSHeiko Carstens 
2672b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2673b0c632dbSHeiko Carstens {
26749d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2675b0c632dbSHeiko Carstens }
2676b0c632dbSHeiko Carstens 
2677b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2678b0c632dbSHeiko Carstens {
2679b0c632dbSHeiko Carstens 	kvm_exit();
2680b0c632dbSHeiko Carstens }
2681b0c632dbSHeiko Carstens 
2682b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2683b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2684566af940SCornelia Huck 
2685566af940SCornelia Huck /*
2686566af940SCornelia Huck  * Enable autoloading of the kvm module.
2687566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2688566af940SCornelia Huck  * since x86 takes a different approach.
2689566af940SCornelia Huck  */
2690566af940SCornelia Huck #include <linux/miscdevice.h>
2691566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2692566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2693