1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 14b0c632dbSHeiko Carstens */ 15b0c632dbSHeiko Carstens 16b0c632dbSHeiko Carstens #include <linux/compiler.h> 17b0c632dbSHeiko Carstens #include <linux/err.h> 18b0c632dbSHeiko Carstens #include <linux/fs.h> 19ca872302SChristian Borntraeger #include <linux/hrtimer.h> 20b0c632dbSHeiko Carstens #include <linux/init.h> 21b0c632dbSHeiko Carstens #include <linux/kvm.h> 22b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 23b0c632dbSHeiko Carstens #include <linux/module.h> 24b0c632dbSHeiko Carstens #include <linux/slab.h> 25ba5c1e9bSCarsten Otte #include <linux/timer.h> 26cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 27b0c632dbSHeiko Carstens #include <asm/lowcore.h> 28b0c632dbSHeiko Carstens #include <asm/pgtable.h> 29f5daba1dSHeiko Carstens #include <asm/nmi.h> 30a0616cdeSDavid Howells #include <asm/switch_to.h> 3178c4b59fSMichael Mueller #include <asm/facility.h> 321526bf9cSChristian Borntraeger #include <asm/sclp.h> 338f2abe6aSChristian Borntraeger #include "kvm-s390.h" 34b0c632dbSHeiko Carstens #include "gaccess.h" 35b0c632dbSHeiko Carstens 365786fffaSCornelia Huck #define CREATE_TRACE_POINTS 375786fffaSCornelia Huck #include "trace.h" 38ade38c31SCornelia Huck #include "trace-s390.h" 395786fffaSCornelia Huck 40b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 41b0c632dbSHeiko Carstens 42b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 43b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 440eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 458f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 468f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 478f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 49ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 50ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 51ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 52f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 53ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 54ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 557697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 56ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 57ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 58ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 59ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 60ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 61ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 62ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6369d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 64453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 65453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 66453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 67453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 68453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 69453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 70453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 71453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 72453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 73bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 745288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 75bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 767697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 775288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 785288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 795288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 805288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 815288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 82388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 83e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 8441628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 85b0c632dbSHeiko Carstens { NULL } 86b0c632dbSHeiko Carstens }; 87b0c632dbSHeiko Carstens 8878c4b59fSMichael Mueller unsigned long *vfacilities; 892c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 90b0c632dbSHeiko Carstens 9178c4b59fSMichael Mueller /* test availability of vfacility */ 9278c4b59fSMichael Mueller static inline int test_vfacility(unsigned long nr) 9378c4b59fSMichael Mueller { 9478c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 9578c4b59fSMichael Mueller } 9678c4b59fSMichael Mueller 97b0c632dbSHeiko Carstens /* Section: not file related */ 9810474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage) 99b0c632dbSHeiko Carstens { 100b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 10110474ae8SAlexander Graf return 0; 102b0c632dbSHeiko Carstens } 103b0c632dbSHeiko Carstens 104b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage) 105b0c632dbSHeiko Carstens { 106b0c632dbSHeiko Carstens } 107b0c632dbSHeiko Carstens 1082c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1092c70fe44SChristian Borntraeger 110b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 111b0c632dbSHeiko Carstens { 1122c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1132c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 114b0c632dbSHeiko Carstens return 0; 115b0c632dbSHeiko Carstens } 116b0c632dbSHeiko Carstens 117b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 118b0c632dbSHeiko Carstens { 1192c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 120b0c632dbSHeiko Carstens } 121b0c632dbSHeiko Carstens 122b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn) 123b0c632dbSHeiko Carstens { 124b0c632dbSHeiko Carstens } 125b0c632dbSHeiko Carstens 126b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 127b0c632dbSHeiko Carstens { 128b0c632dbSHeiko Carstens return 0; 129b0c632dbSHeiko Carstens } 130b0c632dbSHeiko Carstens 131b0c632dbSHeiko Carstens void kvm_arch_exit(void) 132b0c632dbSHeiko Carstens { 133b0c632dbSHeiko Carstens } 134b0c632dbSHeiko Carstens 135b0c632dbSHeiko Carstens /* Section: device related */ 136b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 137b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 138b0c632dbSHeiko Carstens { 139b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 140b0c632dbSHeiko Carstens return s390_enable_sie(); 141b0c632dbSHeiko Carstens return -EINVAL; 142b0c632dbSHeiko Carstens } 143b0c632dbSHeiko Carstens 144b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext) 145b0c632dbSHeiko Carstens { 146d7b0b5ebSCarsten Otte int r; 147d7b0b5ebSCarsten Otte 1482bd0ac4eSCarsten Otte switch (ext) { 149d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 150b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15152e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1521efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1531efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1541efd0f59SCarsten Otte #endif 155*3c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 15660b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 15714eebd91SCarsten Otte case KVM_CAP_ONE_REG: 158d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 159fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 16010ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 161c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 162d7b0b5ebSCarsten Otte r = 1; 163d7b0b5ebSCarsten Otte break; 164e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 165e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 166e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 167e726b1bdSChristian Borntraeger break; 168e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 169e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 170e1e2e605SNick Wang break; 1711526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 172abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1731526bf9cSChristian Borntraeger break; 1742bd0ac4eSCarsten Otte default: 175d7b0b5ebSCarsten Otte r = 0; 176b0c632dbSHeiko Carstens } 177d7b0b5ebSCarsten Otte return r; 1782bd0ac4eSCarsten Otte } 179b0c632dbSHeiko Carstens 180b0c632dbSHeiko Carstens /* Section: vm related */ 181b0c632dbSHeiko Carstens /* 182b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 183b0c632dbSHeiko Carstens */ 184b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 185b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 186b0c632dbSHeiko Carstens { 187b0c632dbSHeiko Carstens return 0; 188b0c632dbSHeiko Carstens } 189b0c632dbSHeiko Carstens 190b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 191b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 192b0c632dbSHeiko Carstens { 193b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 194b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 195b0c632dbSHeiko Carstens int r; 196b0c632dbSHeiko Carstens 197b0c632dbSHeiko Carstens switch (ioctl) { 198ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 199ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 200ba5c1e9bSCarsten Otte 201ba5c1e9bSCarsten Otte r = -EFAULT; 202ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 203ba5c1e9bSCarsten Otte break; 204ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 205ba5c1e9bSCarsten Otte break; 206ba5c1e9bSCarsten Otte } 207b0c632dbSHeiko Carstens default: 208367e1319SAvi Kivity r = -ENOTTY; 209b0c632dbSHeiko Carstens } 210b0c632dbSHeiko Carstens 211b0c632dbSHeiko Carstens return r; 212b0c632dbSHeiko Carstens } 213b0c632dbSHeiko Carstens 214e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 215b0c632dbSHeiko Carstens { 216b0c632dbSHeiko Carstens int rc; 217b0c632dbSHeiko Carstens char debug_name[16]; 218b0c632dbSHeiko Carstens 219e08b9637SCarsten Otte rc = -EINVAL; 220e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 221e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 222e08b9637SCarsten Otte goto out_err; 223e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 224e08b9637SCarsten Otte goto out_err; 225e08b9637SCarsten Otte #else 226e08b9637SCarsten Otte if (type) 227e08b9637SCarsten Otte goto out_err; 228e08b9637SCarsten Otte #endif 229e08b9637SCarsten Otte 230b0c632dbSHeiko Carstens rc = s390_enable_sie(); 231b0c632dbSHeiko Carstens if (rc) 232d89f5effSJan Kiszka goto out_err; 233b0c632dbSHeiko Carstens 234b290411aSCarsten Otte rc = -ENOMEM; 235b290411aSCarsten Otte 236b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 237b0c632dbSHeiko Carstens if (!kvm->arch.sca) 238d89f5effSJan Kiszka goto out_err; 239b0c632dbSHeiko Carstens 240b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 241b0c632dbSHeiko Carstens 242b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 243b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 244b0c632dbSHeiko Carstens goto out_nodbf; 245b0c632dbSHeiko Carstens 246ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 247ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 248ba5c1e9bSCarsten Otte 249b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 250b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 251b0c632dbSHeiko Carstens 252e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 253e08b9637SCarsten Otte kvm->arch.gmap = NULL; 254e08b9637SCarsten Otte } else { 255598841caSCarsten Otte kvm->arch.gmap = gmap_alloc(current->mm); 256598841caSCarsten Otte if (!kvm->arch.gmap) 257598841caSCarsten Otte goto out_nogmap; 2582c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 25924eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 260e08b9637SCarsten Otte } 261fa6b7fe9SCornelia Huck 262fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 263fa6b7fe9SCornelia Huck 264d89f5effSJan Kiszka return 0; 265598841caSCarsten Otte out_nogmap: 266598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 267b0c632dbSHeiko Carstens out_nodbf: 268b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 269d89f5effSJan Kiszka out_err: 270d89f5effSJan Kiszka return rc; 271b0c632dbSHeiko Carstens } 272b0c632dbSHeiko Carstens 273d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 274d329c035SChristian Borntraeger { 275d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 276ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 277*3c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 27858f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 27958f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 28058f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 281abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 282abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 283abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 28458f9460bSCarsten Otte } 285abf4a71eSCarsten Otte smp_mb(); 28627e0393fSCarsten Otte 28727e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 28827e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 28927e0393fSCarsten Otte 290d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 2916692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 292b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 293d329c035SChristian Borntraeger } 294d329c035SChristian Borntraeger 295d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 296d329c035SChristian Borntraeger { 297d329c035SChristian Borntraeger unsigned int i; 298988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 299d329c035SChristian Borntraeger 300988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 301988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 302988a2caeSGleb Natapov 303988a2caeSGleb Natapov mutex_lock(&kvm->lock); 304988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 305d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 306988a2caeSGleb Natapov 307988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 308988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 309d329c035SChristian Borntraeger } 310d329c035SChristian Borntraeger 311ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm) 312ad8ba2cdSSheng Yang { 313ad8ba2cdSSheng Yang } 314ad8ba2cdSSheng Yang 315b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 316b0c632dbSHeiko Carstens { 317d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 318b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 319d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 32027e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 321598841caSCarsten Otte gmap_free(kvm->arch.gmap); 322b0c632dbSHeiko Carstens } 323b0c632dbSHeiko Carstens 324b0c632dbSHeiko Carstens /* Section: vcpu related */ 325b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 326b0c632dbSHeiko Carstens { 327*3c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 328*3c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 32927e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) { 33027e0393fSCarsten Otte vcpu->arch.gmap = gmap_alloc(current->mm); 33127e0393fSCarsten Otte if (!vcpu->arch.gmap) 33227e0393fSCarsten Otte return -ENOMEM; 3332c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 33427e0393fSCarsten Otte return 0; 33527e0393fSCarsten Otte } 33627e0393fSCarsten Otte 337598841caSCarsten Otte vcpu->arch.gmap = vcpu->kvm->arch.gmap; 33859674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 33959674c1aSChristian Borntraeger KVM_SYNC_GPRS | 3409eed0735SChristian Borntraeger KVM_SYNC_ACRS | 3419eed0735SChristian Borntraeger KVM_SYNC_CRS; 342b0c632dbSHeiko Carstens return 0; 343b0c632dbSHeiko Carstens } 344b0c632dbSHeiko Carstens 345b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 346b0c632dbSHeiko Carstens { 3476692cef3SChristian Borntraeger /* Nothing todo */ 348b0c632dbSHeiko Carstens } 349b0c632dbSHeiko Carstens 350b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 351b0c632dbSHeiko Carstens { 3524725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 3534725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 354b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 3554725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 3564725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 35759674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 358480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 3599e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 360b0c632dbSHeiko Carstens } 361b0c632dbSHeiko Carstens 362b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 363b0c632dbSHeiko Carstens { 3649e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 365480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 3664725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 3674725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 36859674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 3694725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 3704725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 371b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 372b0c632dbSHeiko Carstens } 373b0c632dbSHeiko Carstens 374b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 375b0c632dbSHeiko Carstens { 376b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 377b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 378b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 3798d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 380b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 381b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 382b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 383b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 384b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 385b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 386b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 387b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 388b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 389*3c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 390*3c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 39161bde82cSChristian Borntraeger atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 392b0c632dbSHeiko Carstens } 393b0c632dbSHeiko Carstens 39442897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 39542897d86SMarcelo Tosatti { 39642897d86SMarcelo Tosatti return 0; 39742897d86SMarcelo Tosatti } 39842897d86SMarcelo Tosatti 399b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 400b0c632dbSHeiko Carstens { 4019e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 4029e6dabefSCornelia Huck CPUSTAT_SM | 40369d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 40469d0d3a3SChristian Borntraeger CPUSTAT_GED); 405fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 4067feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 4077feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 4087feb6bb8SMichael Mueller 40969d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 410b0c632dbSHeiko Carstens vcpu->arch.sie_block->eca = 0xC1002001U; 41178c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 412ca872302SChristian Borntraeger hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 413ca872302SChristian Borntraeger tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 414ba5c1e9bSCarsten Otte (unsigned long) vcpu); 415ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 416453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 41792e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 418b0c632dbSHeiko Carstens return 0; 419b0c632dbSHeiko Carstens } 420b0c632dbSHeiko Carstens 421b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 422b0c632dbSHeiko Carstens unsigned int id) 423b0c632dbSHeiko Carstens { 4244d47555aSCarsten Otte struct kvm_vcpu *vcpu; 4257feb6bb8SMichael Mueller struct sie_page *sie_page; 4264d47555aSCarsten Otte int rc = -EINVAL; 427b0c632dbSHeiko Carstens 4284d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 4294d47555aSCarsten Otte goto out; 4304d47555aSCarsten Otte 4314d47555aSCarsten Otte rc = -ENOMEM; 4324d47555aSCarsten Otte 433b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 434b0c632dbSHeiko Carstens if (!vcpu) 4354d47555aSCarsten Otte goto out; 436b0c632dbSHeiko Carstens 4377feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 4387feb6bb8SMichael Mueller if (!sie_page) 439b0c632dbSHeiko Carstens goto out_free_cpu; 440b0c632dbSHeiko Carstens 4417feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 4427feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 4437feb6bb8SMichael Mueller 444b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 44558f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 44658f9460bSCarsten Otte if (!kvm->arch.sca) { 44758f9460bSCarsten Otte WARN_ON_ONCE(1); 44858f9460bSCarsten Otte goto out_free_cpu; 44958f9460bSCarsten Otte } 450abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 45158f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 45258f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 45358f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 45458f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 455b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 456fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 45758f9460bSCarsten Otte } 458b0c632dbSHeiko Carstens 459ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 460ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&vcpu->arch.local_int.list); 461ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 462b037a4f3SChristian Borntraeger spin_lock(&kvm->arch.float_int.lock); 463ba5c1e9bSCarsten Otte kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 464d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 4655288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 466b037a4f3SChristian Borntraeger spin_unlock(&kvm->arch.float_int.lock); 467ba5c1e9bSCarsten Otte 468b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 469b0c632dbSHeiko Carstens if (rc) 4707b06bf2fSWei Yongjun goto out_free_sie_block; 471b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 472b0c632dbSHeiko Carstens vcpu->arch.sie_block); 473ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 474b0c632dbSHeiko Carstens 475b0c632dbSHeiko Carstens return vcpu; 4767b06bf2fSWei Yongjun out_free_sie_block: 4777b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 478b0c632dbSHeiko Carstens out_free_cpu: 479b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 4804d47555aSCarsten Otte out: 481b0c632dbSHeiko Carstens return ERR_PTR(rc); 482b0c632dbSHeiko Carstens } 483b0c632dbSHeiko Carstens 484b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 485b0c632dbSHeiko Carstens { 486b0c632dbSHeiko Carstens /* kvm common code refers to this, but never calls it */ 487b0c632dbSHeiko Carstens BUG(); 488b0c632dbSHeiko Carstens return 0; 489b0c632dbSHeiko Carstens } 490b0c632dbSHeiko Carstens 49149b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 49249b99e1eSChristian Borntraeger { 49349b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 49449b99e1eSChristian Borntraeger } 49549b99e1eSChristian Borntraeger 49649b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 49749b99e1eSChristian Borntraeger { 49849b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 49949b99e1eSChristian Borntraeger } 50049b99e1eSChristian Borntraeger 50149b99e1eSChristian Borntraeger /* 50249b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 50349b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 50449b99e1eSChristian Borntraeger * return immediately. */ 50549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 50649b99e1eSChristian Borntraeger { 50749b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 50849b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 50949b99e1eSChristian Borntraeger cpu_relax(); 51049b99e1eSChristian Borntraeger } 51149b99e1eSChristian Borntraeger 51249b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 51349b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 51449b99e1eSChristian Borntraeger { 51549b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 51649b99e1eSChristian Borntraeger exit_sie(vcpu); 51749b99e1eSChristian Borntraeger } 51849b99e1eSChristian Borntraeger 5192c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 5202c70fe44SChristian Borntraeger { 5212c70fe44SChristian Borntraeger int i; 5222c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 5232c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 5242c70fe44SChristian Borntraeger 5252c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 5262c70fe44SChristian Borntraeger /* match against both prefix pages */ 5272c70fe44SChristian Borntraeger if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { 5282c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 5292c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 5302c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 5312c70fe44SChristian Borntraeger } 5322c70fe44SChristian Borntraeger } 5332c70fe44SChristian Borntraeger } 5342c70fe44SChristian Borntraeger 535b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 536b6d33834SChristoffer Dall { 537b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 538b6d33834SChristoffer Dall BUG(); 539b6d33834SChristoffer Dall return 0; 540b6d33834SChristoffer Dall } 541b6d33834SChristoffer Dall 54214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 54314eebd91SCarsten Otte struct kvm_one_reg *reg) 54414eebd91SCarsten Otte { 54514eebd91SCarsten Otte int r = -EINVAL; 54614eebd91SCarsten Otte 54714eebd91SCarsten Otte switch (reg->id) { 54829b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 54929b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 55029b7c71bSCarsten Otte (u32 __user *)reg->addr); 55129b7c71bSCarsten Otte break; 55229b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 55329b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 55429b7c71bSCarsten Otte (u64 __user *)reg->addr); 55529b7c71bSCarsten Otte break; 55646a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 55746a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 55846a6dd1cSJason J. herne (u64 __user *)reg->addr); 55946a6dd1cSJason J. herne break; 56046a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 56146a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 56246a6dd1cSJason J. herne (u64 __user *)reg->addr); 56346a6dd1cSJason J. herne break; 56414eebd91SCarsten Otte default: 56514eebd91SCarsten Otte break; 56614eebd91SCarsten Otte } 56714eebd91SCarsten Otte 56814eebd91SCarsten Otte return r; 56914eebd91SCarsten Otte } 57014eebd91SCarsten Otte 57114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 57214eebd91SCarsten Otte struct kvm_one_reg *reg) 57314eebd91SCarsten Otte { 57414eebd91SCarsten Otte int r = -EINVAL; 57514eebd91SCarsten Otte 57614eebd91SCarsten Otte switch (reg->id) { 57729b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 57829b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 57929b7c71bSCarsten Otte (u32 __user *)reg->addr); 58029b7c71bSCarsten Otte break; 58129b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 58229b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 58329b7c71bSCarsten Otte (u64 __user *)reg->addr); 58429b7c71bSCarsten Otte break; 58546a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 58646a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 58746a6dd1cSJason J. herne (u64 __user *)reg->addr); 58846a6dd1cSJason J. herne break; 58946a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 59046a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 59146a6dd1cSJason J. herne (u64 __user *)reg->addr); 59246a6dd1cSJason J. herne break; 59314eebd91SCarsten Otte default: 59414eebd91SCarsten Otte break; 59514eebd91SCarsten Otte } 59614eebd91SCarsten Otte 59714eebd91SCarsten Otte return r; 59814eebd91SCarsten Otte } 599b6d33834SChristoffer Dall 600b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 601b0c632dbSHeiko Carstens { 602b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 603b0c632dbSHeiko Carstens return 0; 604b0c632dbSHeiko Carstens } 605b0c632dbSHeiko Carstens 606b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 607b0c632dbSHeiko Carstens { 6085a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 609b0c632dbSHeiko Carstens return 0; 610b0c632dbSHeiko Carstens } 611b0c632dbSHeiko Carstens 612b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 613b0c632dbSHeiko Carstens { 6145a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 615b0c632dbSHeiko Carstens return 0; 616b0c632dbSHeiko Carstens } 617b0c632dbSHeiko Carstens 618b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 619b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 620b0c632dbSHeiko Carstens { 62159674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 622b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 62359674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 624b0c632dbSHeiko Carstens return 0; 625b0c632dbSHeiko Carstens } 626b0c632dbSHeiko Carstens 627b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 628b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 629b0c632dbSHeiko Carstens { 63059674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 631b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 632b0c632dbSHeiko Carstens return 0; 633b0c632dbSHeiko Carstens } 634b0c632dbSHeiko Carstens 635b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 636b0c632dbSHeiko Carstens { 6374725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 6384725c860SMartin Schwidefsky return -EINVAL; 639b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 6404725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 6414725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 6424725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 643b0c632dbSHeiko Carstens return 0; 644b0c632dbSHeiko Carstens } 645b0c632dbSHeiko Carstens 646b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 647b0c632dbSHeiko Carstens { 648b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 649b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 650b0c632dbSHeiko Carstens return 0; 651b0c632dbSHeiko Carstens } 652b0c632dbSHeiko Carstens 653b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 654b0c632dbSHeiko Carstens { 655b0c632dbSHeiko Carstens int rc = 0; 656b0c632dbSHeiko Carstens 6579e6dabefSCornelia Huck if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) 658b0c632dbSHeiko Carstens rc = -EBUSY; 659d7b0b5ebSCarsten Otte else { 660d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 661d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 662d7b0b5ebSCarsten Otte } 663b0c632dbSHeiko Carstens return rc; 664b0c632dbSHeiko Carstens } 665b0c632dbSHeiko Carstens 666b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 667b0c632dbSHeiko Carstens struct kvm_translation *tr) 668b0c632dbSHeiko Carstens { 669b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 670b0c632dbSHeiko Carstens } 671b0c632dbSHeiko Carstens 672d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 673d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 674b0c632dbSHeiko Carstens { 675b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 676b0c632dbSHeiko Carstens } 677b0c632dbSHeiko Carstens 67862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 67962d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 68062d9f0dbSMarcelo Tosatti { 68162d9f0dbSMarcelo Tosatti return -EINVAL; /* not implemented yet */ 68262d9f0dbSMarcelo Tosatti } 68362d9f0dbSMarcelo Tosatti 68462d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 68562d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 68662d9f0dbSMarcelo Tosatti { 68762d9f0dbSMarcelo Tosatti return -EINVAL; /* not implemented yet */ 68862d9f0dbSMarcelo Tosatti } 68962d9f0dbSMarcelo Tosatti 6902c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 6912c70fe44SChristian Borntraeger { 6922c70fe44SChristian Borntraeger /* 6932c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 6942c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 6952c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 6962c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 6972c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 6982c70fe44SChristian Borntraeger */ 6992c70fe44SChristian Borntraeger while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 7002c70fe44SChristian Borntraeger int rc; 7012c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 7022c70fe44SChristian Borntraeger vcpu->arch.sie_block->prefix, 7032c70fe44SChristian Borntraeger PAGE_SIZE * 2); 7042c70fe44SChristian Borntraeger if (rc) 7052c70fe44SChristian Borntraeger return rc; 7062c70fe44SChristian Borntraeger s390_vcpu_unblock(vcpu); 7072c70fe44SChristian Borntraeger } 7082c70fe44SChristian Borntraeger return 0; 7092c70fe44SChristian Borntraeger } 7102c70fe44SChristian Borntraeger 71124eb3a82SDominik Dingel static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) 71224eb3a82SDominik Dingel { 71324eb3a82SDominik Dingel long rc; 71424eb3a82SDominik Dingel hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); 71524eb3a82SDominik Dingel struct mm_struct *mm = current->mm; 71624eb3a82SDominik Dingel down_read(&mm->mmap_sem); 71724eb3a82SDominik Dingel rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); 71824eb3a82SDominik Dingel up_read(&mm->mmap_sem); 71924eb3a82SDominik Dingel return rc; 72024eb3a82SDominik Dingel } 72124eb3a82SDominik Dingel 722*3c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 723*3c038e6bSDominik Dingel unsigned long token) 724*3c038e6bSDominik Dingel { 725*3c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 726*3c038e6bSDominik Dingel inti.parm64 = token; 727*3c038e6bSDominik Dingel 728*3c038e6bSDominik Dingel if (start_token) { 729*3c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_INIT; 730*3c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 731*3c038e6bSDominik Dingel } else { 732*3c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 733*3c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 734*3c038e6bSDominik Dingel } 735*3c038e6bSDominik Dingel } 736*3c038e6bSDominik Dingel 737*3c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 738*3c038e6bSDominik Dingel struct kvm_async_pf *work) 739*3c038e6bSDominik Dingel { 740*3c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 741*3c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 742*3c038e6bSDominik Dingel } 743*3c038e6bSDominik Dingel 744*3c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 745*3c038e6bSDominik Dingel struct kvm_async_pf *work) 746*3c038e6bSDominik Dingel { 747*3c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 748*3c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 749*3c038e6bSDominik Dingel } 750*3c038e6bSDominik Dingel 751*3c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 752*3c038e6bSDominik Dingel struct kvm_async_pf *work) 753*3c038e6bSDominik Dingel { 754*3c038e6bSDominik Dingel /* s390 will always inject the page directly */ 755*3c038e6bSDominik Dingel } 756*3c038e6bSDominik Dingel 757*3c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 758*3c038e6bSDominik Dingel { 759*3c038e6bSDominik Dingel /* 760*3c038e6bSDominik Dingel * s390 will always inject the page directly, 761*3c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 762*3c038e6bSDominik Dingel */ 763*3c038e6bSDominik Dingel return true; 764*3c038e6bSDominik Dingel } 765*3c038e6bSDominik Dingel 766*3c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 767*3c038e6bSDominik Dingel { 768*3c038e6bSDominik Dingel hva_t hva; 769*3c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 770*3c038e6bSDominik Dingel int rc; 771*3c038e6bSDominik Dingel 772*3c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 773*3c038e6bSDominik Dingel return 0; 774*3c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 775*3c038e6bSDominik Dingel vcpu->arch.pfault_compare) 776*3c038e6bSDominik Dingel return 0; 777*3c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 778*3c038e6bSDominik Dingel return 0; 779*3c038e6bSDominik Dingel if (kvm_cpu_has_interrupt(vcpu)) 780*3c038e6bSDominik Dingel return 0; 781*3c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 782*3c038e6bSDominik Dingel return 0; 783*3c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 784*3c038e6bSDominik Dingel return 0; 785*3c038e6bSDominik Dingel 786*3c038e6bSDominik Dingel hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); 787*3c038e6bSDominik Dingel if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8)) 788*3c038e6bSDominik Dingel return 0; 789*3c038e6bSDominik Dingel 790*3c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 791*3c038e6bSDominik Dingel return rc; 792*3c038e6bSDominik Dingel } 793*3c038e6bSDominik Dingel 7943fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 795b0c632dbSHeiko Carstens { 7963fb4c40fSThomas Huth int rc, cpuflags; 797e168bf8dSCarsten Otte 798*3c038e6bSDominik Dingel /* 799*3c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 800*3c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 801*3c038e6bSDominik Dingel * handled outside the worker. 802*3c038e6bSDominik Dingel */ 803*3c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 804*3c038e6bSDominik Dingel 8055a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 806b0c632dbSHeiko Carstens 807b0c632dbSHeiko Carstens if (need_resched()) 808b0c632dbSHeiko Carstens schedule(); 809b0c632dbSHeiko Carstens 81071cde587SChristian Borntraeger if (test_thread_flag(TIF_MCCK_PENDING)) 81171cde587SChristian Borntraeger s390_handle_mcck(); 81271cde587SChristian Borntraeger 813d6b6d166SCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) 8140ff31867SCarsten Otte kvm_s390_deliver_pending_interrupts(vcpu); 8150ff31867SCarsten Otte 8162c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 8172c70fe44SChristian Borntraeger if (rc) 8182c70fe44SChristian Borntraeger return rc; 8192c70fe44SChristian Borntraeger 820b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 8213fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 8223fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 8233fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 8242b29a9fdSDominik Dingel 8253fb4c40fSThomas Huth return 0; 8263fb4c40fSThomas Huth } 8273fb4c40fSThomas Huth 8283fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 8293fb4c40fSThomas Huth { 83024eb3a82SDominik Dingel int rc = -1; 8312b29a9fdSDominik Dingel 8322b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 8332b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 8342b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 8352b29a9fdSDominik Dingel 8363fb4c40fSThomas Huth if (exit_reason >= 0) { 8377c470539SMartin Schwidefsky rc = 0; 838210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 839210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 840210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 841210b1607SThomas Huth current->thread.gmap_addr; 842210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 843210b1607SThomas Huth rc = -EREMOTE; 84424eb3a82SDominik Dingel 84524eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 846*3c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 84724eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 848*3c038e6bSDominik Dingel if (kvm_arch_setup_async_pf(vcpu) || 849*3c038e6bSDominik Dingel (kvm_arch_fault_in_sync(vcpu) >= 0)) 85024eb3a82SDominik Dingel rc = 0; 85124eb3a82SDominik Dingel } 85224eb3a82SDominik Dingel 85324eb3a82SDominik Dingel if (rc == -1) { 854699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 855699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 856699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 8571f0d0f09SCarsten Otte } 858b0c632dbSHeiko Carstens 8595a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 8603fb4c40fSThomas Huth 861a76ccff6SThomas Huth if (rc == 0) { 862a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 863a76ccff6SThomas Huth rc = -EOPNOTSUPP; 864a76ccff6SThomas Huth else 865a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 866a76ccff6SThomas Huth } 867a76ccff6SThomas Huth 8683fb4c40fSThomas Huth return rc; 8693fb4c40fSThomas Huth } 8703fb4c40fSThomas Huth 8713fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 8723fb4c40fSThomas Huth { 8733fb4c40fSThomas Huth int rc, exit_reason; 8743fb4c40fSThomas Huth 875800c1065SThomas Huth /* 876800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 877800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 878800c1065SThomas Huth */ 879800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 880800c1065SThomas Huth 881a76ccff6SThomas Huth do { 8823fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 8833fb4c40fSThomas Huth if (rc) 884a76ccff6SThomas Huth break; 8853fb4c40fSThomas Huth 886800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 8873fb4c40fSThomas Huth /* 888a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 889a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 8903fb4c40fSThomas Huth */ 8913fb4c40fSThomas Huth preempt_disable(); 8923fb4c40fSThomas Huth kvm_guest_enter(); 8933fb4c40fSThomas Huth preempt_enable(); 894a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 895a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 8963fb4c40fSThomas Huth kvm_guest_exit(); 897800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 8983fb4c40fSThomas Huth 8993fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 900a76ccff6SThomas Huth } while (!signal_pending(current) && !rc); 9013fb4c40fSThomas Huth 902800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 903e168bf8dSCarsten Otte return rc; 904b0c632dbSHeiko Carstens } 905b0c632dbSHeiko Carstens 906b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 907b0c632dbSHeiko Carstens { 9088f2abe6aSChristian Borntraeger int rc; 909b0c632dbSHeiko Carstens sigset_t sigsaved; 910b0c632dbSHeiko Carstens 911b0c632dbSHeiko Carstens if (vcpu->sigset_active) 912b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 913b0c632dbSHeiko Carstens 9149e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 915b0c632dbSHeiko Carstens 916ba5c1e9bSCarsten Otte BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); 917ba5c1e9bSCarsten Otte 9188f2abe6aSChristian Borntraeger switch (kvm_run->exit_reason) { 9198f2abe6aSChristian Borntraeger case KVM_EXIT_S390_SIEIC: 9208f2abe6aSChristian Borntraeger case KVM_EXIT_UNKNOWN: 9219ace903dSChristian Ehrhardt case KVM_EXIT_INTR: 9228f2abe6aSChristian Borntraeger case KVM_EXIT_S390_RESET: 923e168bf8dSCarsten Otte case KVM_EXIT_S390_UCONTROL: 924fa6b7fe9SCornelia Huck case KVM_EXIT_S390_TSCH: 9258f2abe6aSChristian Borntraeger break; 9268f2abe6aSChristian Borntraeger default: 9278f2abe6aSChristian Borntraeger BUG(); 9288f2abe6aSChristian Borntraeger } 9298f2abe6aSChristian Borntraeger 930d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 931d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 93260b413c9SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { 93360b413c9SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; 93460b413c9SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 93560b413c9SChristian Borntraeger } 9369eed0735SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 9379eed0735SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; 9389eed0735SChristian Borntraeger memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 9399eed0735SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 9409eed0735SChristian Borntraeger } 941d7b0b5ebSCarsten Otte 942dab4079dSHeiko Carstens might_fault(); 943e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 9449ace903dSChristian Ehrhardt 945b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 946b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 9478f2abe6aSChristian Borntraeger rc = -EINTR; 948b1d16c49SChristian Ehrhardt } 9498f2abe6aSChristian Borntraeger 950b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 9518f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 9528f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 9538f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 9548f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 9558f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 9568f2abe6aSChristian Borntraeger rc = 0; 9578f2abe6aSChristian Borntraeger } 9588f2abe6aSChristian Borntraeger 9598f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 9608f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 9618f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 9628f2abe6aSChristian Borntraeger rc = 0; 9638f2abe6aSChristian Borntraeger } 9648f2abe6aSChristian Borntraeger 965d7b0b5ebSCarsten Otte kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 966d7b0b5ebSCarsten Otte kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 96760b413c9SChristian Borntraeger kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; 9689eed0735SChristian Borntraeger memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 969d7b0b5ebSCarsten Otte 970b0c632dbSHeiko Carstens if (vcpu->sigset_active) 971b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 972b0c632dbSHeiko Carstens 973b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 9747e8e6ab4SHeiko Carstens return rc; 975b0c632dbSHeiko Carstens } 976b0c632dbSHeiko Carstens 977092670cdSCarsten Otte static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, 978b0c632dbSHeiko Carstens unsigned long n, int prefix) 979b0c632dbSHeiko Carstens { 980b0c632dbSHeiko Carstens if (prefix) 981b0c632dbSHeiko Carstens return copy_to_guest(vcpu, guestdest, from, n); 982b0c632dbSHeiko Carstens else 983b0c632dbSHeiko Carstens return copy_to_guest_absolute(vcpu, guestdest, from, n); 984b0c632dbSHeiko Carstens } 985b0c632dbSHeiko Carstens 986b0c632dbSHeiko Carstens /* 987b0c632dbSHeiko Carstens * store status at address 988b0c632dbSHeiko Carstens * we use have two special cases: 989b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 990b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 991b0c632dbSHeiko Carstens */ 992e879892cSThomas Huth int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) 993b0c632dbSHeiko Carstens { 994092670cdSCarsten Otte unsigned char archmode = 1; 995b0c632dbSHeiko Carstens int prefix; 996178bd789SThomas Huth u64 clkcomp; 997b0c632dbSHeiko Carstens 998b0c632dbSHeiko Carstens if (addr == KVM_S390_STORE_STATUS_NOADDR) { 999b0c632dbSHeiko Carstens if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) 1000b0c632dbSHeiko Carstens return -EFAULT; 1001b0c632dbSHeiko Carstens addr = SAVE_AREA_BASE; 1002b0c632dbSHeiko Carstens prefix = 0; 1003b0c632dbSHeiko Carstens } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { 1004b0c632dbSHeiko Carstens if (copy_to_guest(vcpu, 163ul, &archmode, 1)) 1005b0c632dbSHeiko Carstens return -EFAULT; 1006b0c632dbSHeiko Carstens addr = SAVE_AREA_BASE; 1007b0c632dbSHeiko Carstens prefix = 1; 1008b0c632dbSHeiko Carstens } else 1009b0c632dbSHeiko Carstens prefix = 0; 1010b0c632dbSHeiko Carstens 1011f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), 1012b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128, prefix)) 1013b0c632dbSHeiko Carstens return -EFAULT; 1014b0c632dbSHeiko Carstens 1015f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), 10165a32c1afSChristian Borntraeger vcpu->run->s.regs.gprs, 128, prefix)) 1017b0c632dbSHeiko Carstens return -EFAULT; 1018b0c632dbSHeiko Carstens 1019f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), 1020b0c632dbSHeiko Carstens &vcpu->arch.sie_block->gpsw, 16, prefix)) 1021b0c632dbSHeiko Carstens return -EFAULT; 1022b0c632dbSHeiko Carstens 1023f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), 1024b0c632dbSHeiko Carstens &vcpu->arch.sie_block->prefix, 4, prefix)) 1025b0c632dbSHeiko Carstens return -EFAULT; 1026b0c632dbSHeiko Carstens 1027b0c632dbSHeiko Carstens if (__guestcopy(vcpu, 1028f64ca217SHeiko Carstens addr + offsetof(struct save_area, fp_ctrl_reg), 1029b0c632dbSHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4, prefix)) 1030b0c632dbSHeiko Carstens return -EFAULT; 1031b0c632dbSHeiko Carstens 1032f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), 1033b0c632dbSHeiko Carstens &vcpu->arch.sie_block->todpr, 4, prefix)) 1034b0c632dbSHeiko Carstens return -EFAULT; 1035b0c632dbSHeiko Carstens 1036f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), 1037b0c632dbSHeiko Carstens &vcpu->arch.sie_block->cputm, 8, prefix)) 1038b0c632dbSHeiko Carstens return -EFAULT; 1039b0c632dbSHeiko Carstens 1040178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1041f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), 1042178bd789SThomas Huth &clkcomp, 8, prefix)) 1043b0c632dbSHeiko Carstens return -EFAULT; 1044b0c632dbSHeiko Carstens 1045f64ca217SHeiko Carstens if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), 104659674c1aSChristian Borntraeger &vcpu->run->s.regs.acrs, 64, prefix)) 1047b0c632dbSHeiko Carstens return -EFAULT; 1048b0c632dbSHeiko Carstens 1049b0c632dbSHeiko Carstens if (__guestcopy(vcpu, 1050f64ca217SHeiko Carstens addr + offsetof(struct save_area, ctrl_regs), 1051b0c632dbSHeiko Carstens &vcpu->arch.sie_block->gcr, 128, prefix)) 1052b0c632dbSHeiko Carstens return -EFAULT; 1053b0c632dbSHeiko Carstens return 0; 1054b0c632dbSHeiko Carstens } 1055b0c632dbSHeiko Carstens 1056e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1057e879892cSThomas Huth { 1058e879892cSThomas Huth /* 1059e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1060e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1061e879892cSThomas Huth * it into the save area 1062e879892cSThomas Huth */ 1063e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1064e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1065e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1066e879892cSThomas Huth 1067e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1068e879892cSThomas Huth } 1069e879892cSThomas Huth 1070d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1071d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1072d6712df9SCornelia Huck { 1073d6712df9SCornelia Huck int r; 1074d6712df9SCornelia Huck 1075d6712df9SCornelia Huck if (cap->flags) 1076d6712df9SCornelia Huck return -EINVAL; 1077d6712df9SCornelia Huck 1078d6712df9SCornelia Huck switch (cap->cap) { 1079fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1080fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1081fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1082fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1083fa6b7fe9SCornelia Huck } 1084fa6b7fe9SCornelia Huck r = 0; 1085fa6b7fe9SCornelia Huck break; 1086d6712df9SCornelia Huck default: 1087d6712df9SCornelia Huck r = -EINVAL; 1088d6712df9SCornelia Huck break; 1089d6712df9SCornelia Huck } 1090d6712df9SCornelia Huck return r; 1091d6712df9SCornelia Huck } 1092d6712df9SCornelia Huck 1093b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1094b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1095b0c632dbSHeiko Carstens { 1096b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1097b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1098800c1065SThomas Huth int idx; 1099bc923cc9SAvi Kivity long r; 1100b0c632dbSHeiko Carstens 110193736624SAvi Kivity switch (ioctl) { 110293736624SAvi Kivity case KVM_S390_INTERRUPT: { 1103ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1104ba5c1e9bSCarsten Otte 110593736624SAvi Kivity r = -EFAULT; 1106ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 110793736624SAvi Kivity break; 110893736624SAvi Kivity r = kvm_s390_inject_vcpu(vcpu, &s390int); 110993736624SAvi Kivity break; 1110ba5c1e9bSCarsten Otte } 1111b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1112800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1113bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1114800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1115bc923cc9SAvi Kivity break; 1116b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1117b0c632dbSHeiko Carstens psw_t psw; 1118b0c632dbSHeiko Carstens 1119bc923cc9SAvi Kivity r = -EFAULT; 1120b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1121bc923cc9SAvi Kivity break; 1122bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1123bc923cc9SAvi Kivity break; 1124b0c632dbSHeiko Carstens } 1125b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1126bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1127bc923cc9SAvi Kivity break; 112814eebd91SCarsten Otte case KVM_SET_ONE_REG: 112914eebd91SCarsten Otte case KVM_GET_ONE_REG: { 113014eebd91SCarsten Otte struct kvm_one_reg reg; 113114eebd91SCarsten Otte r = -EFAULT; 113214eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 113314eebd91SCarsten Otte break; 113414eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 113514eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 113614eebd91SCarsten Otte else 113714eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 113814eebd91SCarsten Otte break; 113914eebd91SCarsten Otte } 114027e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 114127e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 114227e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 114327e0393fSCarsten Otte 114427e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 114527e0393fSCarsten Otte r = -EFAULT; 114627e0393fSCarsten Otte break; 114727e0393fSCarsten Otte } 114827e0393fSCarsten Otte 114927e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 115027e0393fSCarsten Otte r = -EINVAL; 115127e0393fSCarsten Otte break; 115227e0393fSCarsten Otte } 115327e0393fSCarsten Otte 115427e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 115527e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 115627e0393fSCarsten Otte break; 115727e0393fSCarsten Otte } 115827e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 115927e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 116027e0393fSCarsten Otte 116127e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 116227e0393fSCarsten Otte r = -EFAULT; 116327e0393fSCarsten Otte break; 116427e0393fSCarsten Otte } 116527e0393fSCarsten Otte 116627e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 116727e0393fSCarsten Otte r = -EINVAL; 116827e0393fSCarsten Otte break; 116927e0393fSCarsten Otte } 117027e0393fSCarsten Otte 117127e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 117227e0393fSCarsten Otte ucasmap.length); 117327e0393fSCarsten Otte break; 117427e0393fSCarsten Otte } 117527e0393fSCarsten Otte #endif 1176ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1177ccc7910fSCarsten Otte r = gmap_fault(arg, vcpu->arch.gmap); 1178ccc7910fSCarsten Otte if (!IS_ERR_VALUE(r)) 1179ccc7910fSCarsten Otte r = 0; 1180ccc7910fSCarsten Otte break; 1181ccc7910fSCarsten Otte } 1182d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1183d6712df9SCornelia Huck { 1184d6712df9SCornelia Huck struct kvm_enable_cap cap; 1185d6712df9SCornelia Huck r = -EFAULT; 1186d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1187d6712df9SCornelia Huck break; 1188d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1189d6712df9SCornelia Huck break; 1190d6712df9SCornelia Huck } 1191b0c632dbSHeiko Carstens default: 11923e6afcf1SCarsten Otte r = -ENOTTY; 1193b0c632dbSHeiko Carstens } 1194bc923cc9SAvi Kivity return r; 1195b0c632dbSHeiko Carstens } 1196b0c632dbSHeiko Carstens 11975b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 11985b1c1493SCarsten Otte { 11995b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 12005b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 12015b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 12025b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 12035b1c1493SCarsten Otte get_page(vmf->page); 12045b1c1493SCarsten Otte return 0; 12055b1c1493SCarsten Otte } 12065b1c1493SCarsten Otte #endif 12075b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 12085b1c1493SCarsten Otte } 12095b1c1493SCarsten Otte 12105587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1211db3fe4ebSTakuya Yoshikawa struct kvm_memory_slot *dont) 1212db3fe4ebSTakuya Yoshikawa { 1213db3fe4ebSTakuya Yoshikawa } 1214db3fe4ebSTakuya Yoshikawa 12155587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 12165587027cSAneesh Kumar K.V unsigned long npages) 1217db3fe4ebSTakuya Yoshikawa { 1218db3fe4ebSTakuya Yoshikawa return 0; 1219db3fe4ebSTakuya Yoshikawa } 1220db3fe4ebSTakuya Yoshikawa 1221e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm) 1222e59dbe09STakuya Yoshikawa { 1223e59dbe09STakuya Yoshikawa } 1224e59dbe09STakuya Yoshikawa 1225b0c632dbSHeiko Carstens /* Section: memory related */ 1226f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1227f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 12287b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 12297b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1230b0c632dbSHeiko Carstens { 1231dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1232dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1233dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1234dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1235b0c632dbSHeiko Carstens 1236598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1237b0c632dbSHeiko Carstens return -EINVAL; 1238b0c632dbSHeiko Carstens 1239598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1240b0c632dbSHeiko Carstens return -EINVAL; 1241b0c632dbSHeiko Carstens 1242f7784b8eSMarcelo Tosatti return 0; 1243f7784b8eSMarcelo Tosatti } 1244f7784b8eSMarcelo Tosatti 1245f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1246f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 12478482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 12488482644aSTakuya Yoshikawa enum kvm_mr_change change) 1249f7784b8eSMarcelo Tosatti { 1250f7850c92SCarsten Otte int rc; 1251f7784b8eSMarcelo Tosatti 12522cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 12532cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 12542cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 12552cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 12562cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 12572cef4debSChristian Borntraeger */ 12582cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 12592cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 12602cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 12612cef4debSChristian Borntraeger return; 1262598841caSCarsten Otte 1263598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1264598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1265598841caSCarsten Otte if (rc) 1266f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1267598841caSCarsten Otte return; 1268b0c632dbSHeiko Carstens } 1269b0c632dbSHeiko Carstens 12702df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm) 12712df72e9bSMarcelo Tosatti { 12722df72e9bSMarcelo Tosatti } 12732df72e9bSMarcelo Tosatti 12742df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12752df72e9bSMarcelo Tosatti struct kvm_memory_slot *slot) 127634d4cb8fSMarcelo Tosatti { 127734d4cb8fSMarcelo Tosatti } 127834d4cb8fSMarcelo Tosatti 1279b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1280b0c632dbSHeiko Carstens { 1281ef50f7acSChristian Borntraeger int ret; 12820ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1283ef50f7acSChristian Borntraeger if (ret) 1284ef50f7acSChristian Borntraeger return ret; 1285ef50f7acSChristian Borntraeger 1286ef50f7acSChristian Borntraeger /* 1287ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 128825985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1289ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1290ef50f7acSChristian Borntraeger */ 129178c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 129278c4b59fSMichael Mueller if (!vfacilities) { 1293ef50f7acSChristian Borntraeger kvm_exit(); 1294ef50f7acSChristian Borntraeger return -ENOMEM; 1295ef50f7acSChristian Borntraeger } 129678c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1297d208c79dSThomas Huth vfacilities[0] &= 0xff82fff3f4fc2000UL; 12987feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1299ef50f7acSChristian Borntraeger return 0; 1300b0c632dbSHeiko Carstens } 1301b0c632dbSHeiko Carstens 1302b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1303b0c632dbSHeiko Carstens { 130478c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1305b0c632dbSHeiko Carstens kvm_exit(); 1306b0c632dbSHeiko Carstens } 1307b0c632dbSHeiko Carstens 1308b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1309b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1310566af940SCornelia Huck 1311566af940SCornelia Huck /* 1312566af940SCornelia Huck * Enable autoloading of the kvm module. 1313566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1314566af940SCornelia Huck * since x86 takes a different approach. 1315566af940SCornelia Huck */ 1316566af940SCornelia Huck #include <linux/miscdevice.h> 1317566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1318566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1319