Lines Matching +full:async +full:- +full:prefix

1 // SPDX-License-Identifier: GPL-2.0
13 #define KMSG_COMPONENT "kvm-s390"
36 #include <asm/access-regs.h>
37 #include <asm/asm-offsets.h>
50 #include "kvm-s390.h"
57 #include "trace-s390.h"
224 * the feature is opt-in anyway
232 * kernel handles and stores in the prefix page. If we ever need to go beyond
239 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
280 * -delta to the epoch. in kvm_clock_sync_scb()
282 delta = -delta; in kvm_clock_sync_scb()
284 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
286 delta_idx = -1; in kvm_clock_sync_scb()
288 scb->epoch += delta; in kvm_clock_sync_scb()
289 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
290 scb->epdx += delta_idx; in kvm_clock_sync_scb()
291 if (scb->epoch < delta) in kvm_clock_sync_scb()
292 scb->epdx += 1; in kvm_clock_sync_scb()
312 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
314 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
315 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
317 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
318 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
319 if (vcpu->arch.vsie_block) in kvm_clock_sync()
320 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
395 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
494 int rc = -ENOMEM; in __kvm_s390_init()
496 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
498 return -ENOMEM; in __kvm_s390_init()
500 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
570 return -EINVAL; in kvm_arch_dev_ioctl()
698 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
702 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
703 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
738 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
740 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
742 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
743 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
753 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
757 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
775 if (cap->flags) in kvm_vm_ioctl_enable_cap()
776 return -EINVAL; in kvm_vm_ioctl_enable_cap()
778 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
781 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
786 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
790 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
791 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
792 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
794 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
795 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
797 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
798 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
801 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
802 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
805 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
806 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
809 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
810 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
813 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
814 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
817 set_kvm_facility(kvm->arch.model.fac_mask, 198); in kvm_vm_ioctl_enable_cap()
818 set_kvm_facility(kvm->arch.model.fac_list, 198); in kvm_vm_ioctl_enable_cap()
821 set_kvm_facility(kvm->arch.model.fac_mask, 199); in kvm_vm_ioctl_enable_cap()
822 set_kvm_facility(kvm->arch.model.fac_list, 199); in kvm_vm_ioctl_enable_cap()
826 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
827 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
832 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
833 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
834 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
835 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
837 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
838 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
841 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
846 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
847 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
848 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
850 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
851 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
854 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
859 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
860 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
861 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
862 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
864 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
865 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
868 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
873 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
874 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
875 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
876 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
877 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
880 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
881 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
882 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
888 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
889 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
891 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
897 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
902 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
907 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
908 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
909 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
910 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
912 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
913 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
916 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
921 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
931 switch (attr->attr) { in kvm_s390_get_mem_control()
935 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
936 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
937 ret = -EFAULT; in kvm_s390_get_mem_control()
940 ret = -ENXIO; in kvm_s390_get_mem_control()
950 switch (attr->attr) { in kvm_s390_set_mem_control()
952 ret = -ENXIO; in kvm_s390_set_mem_control()
957 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
958 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
959 ret = -EBUSY; in kvm_s390_set_mem_control()
960 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
961 ret = -EINVAL; in kvm_s390_set_mem_control()
963 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
965 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
968 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
971 ret = -ENXIO; in kvm_s390_set_mem_control()
974 ret = -EINVAL; in kvm_s390_set_mem_control()
975 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
979 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
980 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
981 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
982 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
983 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
990 return -EINVAL; in kvm_s390_set_mem_control()
992 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
993 return -EFAULT; in kvm_s390_set_mem_control()
995 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
996 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
997 return -E2BIG; in kvm_s390_set_mem_control()
1000 return -EINVAL; in kvm_s390_set_mem_control()
1004 new_limit -= 1; in kvm_s390_set_mem_control()
1006 ret = -EBUSY; in kvm_s390_set_mem_control()
1007 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
1008 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
1010 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
1013 ret = -ENOMEM; in kvm_s390_set_mem_control()
1015 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
1016 new->private = kvm; in kvm_s390_set_mem_control()
1017 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
1021 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1024 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1028 ret = -ENXIO; in kvm_s390_set_mem_control()
1054 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1055 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1058 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1059 return -EINVAL; in kvm_s390_vm_set_crypto()
1062 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1063 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1064 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1069 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1070 return -EINVAL; in kvm_s390_vm_set_crypto()
1073 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1074 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1075 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1080 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1081 return -EINVAL; in kvm_s390_vm_set_crypto()
1083 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1084 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1085 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1090 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1091 return -EINVAL; in kvm_s390_vm_set_crypto()
1093 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1094 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1095 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1100 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1101 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1103 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1107 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1108 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1110 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1113 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1114 return -ENXIO; in kvm_s390_vm_set_crypto()
1118 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1125 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1128 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1129 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1137 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1146 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1168 * Must be called with kvm->srcu held to avoid races on memslots, and with
1169 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1179 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1183 return -EINVAL; in kvm_s390_vm_start_migration()
1185 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1186 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1191 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1192 return -EINVAL; in kvm_s390_vm_start_migration()
1200 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1202 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1203 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1209 * Must be called with kvm->slots_lock to avoid races with ourselves and
1215 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1217 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1218 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1226 int res = -ENXIO; in kvm_s390_vm_set_migration()
1228 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1229 switch (attr->attr) { in kvm_s390_vm_set_migration()
1239 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1247 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1249 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1250 return -ENXIO; in kvm_s390_vm_get_migration()
1252 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1253 return -EFAULT; in kvm_s390_vm_get_migration()
1263 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1264 return -EFAULT; in kvm_s390_set_tod_ext()
1267 return -EINVAL; in kvm_s390_set_tod_ext()
1280 if (copy_from_user(&gtod_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1282 return -EFAULT; in kvm_s390_set_tod_high()
1285 return -EINVAL; in kvm_s390_set_tod_high()
1295 if (copy_from_user(&gtod.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1297 return -EFAULT; in kvm_s390_set_tod_low()
1308 if (attr->flags) in kvm_s390_set_tod()
1309 return -EINVAL; in kvm_s390_set_tod()
1311 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1317 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1321 switch (attr->attr) { in kvm_s390_set_tod()
1332 ret = -ENXIO; in kvm_s390_set_tod()
1337 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1350 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1351 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1353 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1354 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1355 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1367 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_ext()
1368 return -EFAULT; in kvm_s390_get_tod_ext()
1379 if (copy_to_user((void __user *)attr->addr, &gtod_high, in kvm_s390_get_tod_high()
1381 return -EFAULT; in kvm_s390_get_tod_high()
1392 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_low()
1393 return -EFAULT; in kvm_s390_get_tod_low()
1403 if (attr->flags) in kvm_s390_get_tod()
1404 return -EINVAL; in kvm_s390_get_tod()
1406 switch (attr->attr) { in kvm_s390_get_tod()
1417 ret = -ENXIO; in kvm_s390_get_tod()
1429 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1430 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1431 ret = -EBUSY; in kvm_s390_set_processor()
1436 ret = -ENOMEM; in kvm_s390_set_processor()
1439 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1441 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1444 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1445 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1446 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1447 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1448 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1450 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1452 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1455 kvm->arch.model.ibc, in kvm_s390_set_processor()
1456 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1458 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1459 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1460 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1462 ret = -EFAULT; in kvm_s390_set_processor()
1465 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1474 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1475 return -EFAULT; in kvm_s390_set_processor_feat()
1479 return -EINVAL; in kvm_s390_set_processor_feat()
1481 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1482 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1483 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1484 return -EBUSY; in kvm_s390_set_processor_feat()
1486 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1487 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1498 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1499 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1500 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1501 return -EBUSY; in kvm_s390_set_processor_subfunc()
1504 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1506 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1507 return -EFAULT; in kvm_s390_set_processor_subfunc()
1509 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1513 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1548 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1551 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1560 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1586 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1590 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1591 return -EFAULT; in kvm_s390_set_uv_feat()
1593 return -EINVAL; in kvm_s390_set_uv_feat()
1595 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1596 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1597 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1598 return -EBUSY; in kvm_s390_set_uv_feat()
1600 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1601 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1603 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1610 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1612 switch (attr->attr) { in kvm_s390_set_cpu_model()
1636 ret = -ENOMEM; in kvm_s390_get_processor()
1639 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1640 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1641 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1644 kvm->arch.model.ibc, in kvm_s390_get_processor()
1645 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1647 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1648 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1649 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1650 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1651 ret = -EFAULT; in kvm_s390_get_processor()
1664 ret = -ENOMEM; in kvm_s390_get_machine()
1667 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1668 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1669 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1671 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1674 kvm->arch.model.ibc, in kvm_s390_get_machine()
1675 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1677 mach->fac_mask[0], in kvm_s390_get_machine()
1678 mach->fac_mask[1], in kvm_s390_get_machine()
1679 mach->fac_mask[2]); in kvm_s390_get_machine()
1681 mach->fac_list[0], in kvm_s390_get_machine()
1682 mach->fac_list[1], in kvm_s390_get_machine()
1683 mach->fac_list[2]); in kvm_s390_get_machine()
1684 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1685 ret = -EFAULT; in kvm_s390_get_machine()
1696 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1697 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1698 return -EFAULT; in kvm_s390_get_processor_feat()
1712 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1713 return -EFAULT; in kvm_s390_get_machine_feat()
1724 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1726 return -EFAULT; in kvm_s390_get_processor_subfunc()
1729 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1730 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1747 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1752 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1753 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1755 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1756 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1759 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1762 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1764 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1765 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1767 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1768 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1770 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1771 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1774 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1776 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1777 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1778 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1779 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1781 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1782 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1783 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1784 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1795 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1797 return -EFAULT; in kvm_s390_get_machine_subfunc()
1865 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1866 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1868 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1869 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1870 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1877 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1883 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1884 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1885 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1892 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1894 switch (attr->attr) { in kvm_s390_get_cpu_model()
1924 * kvm_s390_update_topology_change_report - update CPU topology change report
1928 * Updates the Multiprocessor Topology-Change-Report bit to signal
1939 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1940 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1941 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1945 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); in kvm_s390_update_topology_change_report()
1946 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1953 return -ENXIO; in kvm_s390_set_topo_change_indication()
1955 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1965 return -ENXIO; in kvm_s390_get_topo_change_indication()
1967 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1968 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1969 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1971 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1978 switch (attr->group) { in kvm_s390_vm_set_attr()
1998 ret = -ENXIO; in kvm_s390_vm_set_attr()
2009 switch (attr->group) { in kvm_s390_vm_get_attr()
2026 ret = -ENXIO; in kvm_s390_vm_get_attr()
2037 switch (attr->group) { in kvm_s390_vm_has_attr()
2039 switch (attr->attr) { in kvm_s390_vm_has_attr()
2042 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2048 ret = -ENXIO; in kvm_s390_vm_has_attr()
2053 switch (attr->attr) { in kvm_s390_vm_has_attr()
2059 ret = -ENXIO; in kvm_s390_vm_has_attr()
2064 switch (attr->attr) { in kvm_s390_vm_has_attr()
2076 ret = -ENXIO; in kvm_s390_vm_has_attr()
2081 switch (attr->attr) { in kvm_s390_vm_has_attr()
2090 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2093 ret = -ENXIO; in kvm_s390_vm_has_attr()
2101 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2104 ret = -ENXIO; in kvm_s390_vm_has_attr()
2117 if (args->flags != 0) in kvm_s390_get_skeys()
2118 return -EINVAL; in kvm_s390_get_skeys()
2121 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2125 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2126 return -EINVAL; in kvm_s390_get_skeys()
2128 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2130 return -ENOMEM; in kvm_s390_get_skeys()
2132 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2133 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2134 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2135 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2137 r = -EFAULT; in kvm_s390_get_skeys()
2141 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2145 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2146 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2149 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2150 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2152 r = -EFAULT; in kvm_s390_get_skeys()
2166 if (args->flags != 0) in kvm_s390_set_skeys()
2167 return -EINVAL; in kvm_s390_set_skeys()
2170 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2171 return -EINVAL; in kvm_s390_set_skeys()
2173 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2175 return -ENOMEM; in kvm_s390_set_skeys()
2177 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2178 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2180 r = -EFAULT; in kvm_s390_set_skeys()
2190 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2191 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2192 while (i < args->count) { in kvm_s390_set_skeys()
2194 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2196 r = -EFAULT; in kvm_s390_set_skeys()
2202 r = -EINVAL; in kvm_s390_set_skeys()
2206 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2208 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2216 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2217 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2235 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2237 args->count = 0; in kvm_s390_peek_cmma()
2238 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2245 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2246 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2248 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2265 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2266 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2268 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2272 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2274 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2278 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2281 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2282 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2283 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2284 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2286 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2299 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2301 args->count = 0; in kvm_s390_get_cmma()
2302 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2308 while (args->count < bufsize) { in kvm_s390_get_cmma()
2313 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2314 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2315 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2318 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2327 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2331 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2355 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2356 return -ENXIO; in kvm_s390_get_cmma_bits()
2358 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2359 return -EINVAL; in kvm_s390_get_cmma_bits()
2361 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2362 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2363 return -EINVAL; in kvm_s390_get_cmma_bits()
2365 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2366 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2371 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2378 return -ENOMEM; in kvm_s390_get_cmma_bits()
2380 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2381 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2386 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2387 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2389 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2390 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2392 args->remaining = 0; in kvm_s390_get_cmma_bits()
2394 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2395 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2404 * set and the mm->context.uses_cmm flag is set.
2413 mask = args->mask; in kvm_s390_set_cmma_bits()
2415 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2416 return -ENXIO; in kvm_s390_set_cmma_bits()
2418 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2419 return -EINVAL; in kvm_s390_set_cmma_bits()
2421 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2422 return -EINVAL; in kvm_s390_set_cmma_bits()
2424 if (args->count == 0) in kvm_s390_set_cmma_bits()
2427 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2429 return -ENOMEM; in kvm_s390_set_cmma_bits()
2431 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2433 r = -EFAULT; in kvm_s390_set_cmma_bits()
2437 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2438 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2439 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2440 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2442 r = -EFAULT; in kvm_s390_set_cmma_bits()
2449 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2451 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2452 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2454 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2455 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2456 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2457 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2465 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2475 * Return: 0 in case of success, otherwise -EIO
2493 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2497 ret = -EIO; in kvm_s390_cpus_from_pv()
2499 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2501 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2508 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2516 * Return: 0 in case of success, otherwise -EIO
2531 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2533 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2554 switch (info->header.id) { in kvm_s390_handle_pv_info()
2556 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2558 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2559 return -EINVAL; in kvm_s390_handle_pv_info()
2561 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2566 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2567 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2568 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2569 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2574 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2576 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2577 return -EINVAL; in kvm_s390_handle_pv_info()
2579 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2580 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2581 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2585 return -EINVAL; in kvm_s390_handle_pv_info()
2592 int r = -EINVAL; in kvm_s390_pv_dmp()
2597 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2607 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2609 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2611 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2614 r = -EINVAL; in kvm_s390_pv_dmp()
2619 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2628 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2632 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2635 r = -EINVAL; in kvm_s390_pv_dmp()
2640 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2644 r = -ENOTTY; in kvm_s390_pv_dmp()
2653 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2654 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2659 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2661 switch (cmd->cmd) { in kvm_s390_handle_pv()
2663 r = -EINVAL; in kvm_s390_handle_pv()
2679 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2683 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2688 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2692 r = -EINVAL; in kvm_s390_handle_pv()
2696 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2704 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2707 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2710 r = -EINVAL; in kvm_s390_handle_pv()
2713 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2714 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2717 r = -EINVAL; in kvm_s390_handle_pv()
2721 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2729 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2732 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2739 r = -EINVAL; in kvm_s390_handle_pv()
2743 r = -EFAULT; in kvm_s390_handle_pv()
2748 r = -EINVAL; in kvm_s390_handle_pv()
2752 r = -ENOMEM; in kvm_s390_handle_pv()
2757 r = -EFAULT; in kvm_s390_handle_pv()
2761 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2769 r = -EINVAL; in kvm_s390_handle_pv()
2770 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2773 r = -EFAULT; in kvm_s390_handle_pv()
2778 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2782 r = -EINVAL; in kvm_s390_handle_pv()
2787 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2788 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2789 cmd->rrc); in kvm_s390_handle_pv()
2793 r = -EINVAL; in kvm_s390_handle_pv()
2798 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2800 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2804 r = -EINVAL; in kvm_s390_handle_pv()
2809 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2811 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2827 r = -EFAULT; in kvm_s390_handle_pv()
2831 r = -EINVAL; in kvm_s390_handle_pv()
2847 r = -EFAULT; in kvm_s390_handle_pv()
2857 r = -EINVAL; in kvm_s390_handle_pv()
2861 r = -EFAULT; in kvm_s390_handle_pv()
2870 r = -EFAULT; in kvm_s390_handle_pv()
2877 r = -ENOTTY; in kvm_s390_handle_pv()
2880 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2887 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2888 return -EINVAL; in mem_op_validate_common()
2889 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2890 return -E2BIG; in mem_op_validate_common()
2891 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2892 if (mop->key > 0xf) in mem_op_validate_common()
2893 return -EINVAL; in mem_op_validate_common()
2895 mop->key = 0; in mem_op_validate_common()
2902 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2912 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2913 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2915 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2918 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2920 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2925 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2926 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2927 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2931 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2932 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2935 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2936 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2938 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2939 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2942 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2943 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2947 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2955 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2956 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2961 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2973 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2974 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2975 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2976 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2977 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2978 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2980 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2982 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2987 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2988 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2989 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2990 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2993 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
3000 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
3001 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
3003 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
3009 return -EINVAL; in kvm_s390_vm_mem_op()
3011 switch (mop->op) { in kvm_s390_vm_mem_op()
3018 return -EINVAL; in kvm_s390_vm_mem_op()
3024 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
3033 r = -EFAULT; in kvm_arch_vm_ioctl()
3040 r = -EINVAL; in kvm_arch_vm_ioctl()
3041 if (kvm->arch.use_irqchip) in kvm_arch_vm_ioctl()
3046 r = -EFAULT; in kvm_arch_vm_ioctl()
3053 r = -EFAULT; in kvm_arch_vm_ioctl()
3060 r = -EFAULT; in kvm_arch_vm_ioctl()
3069 r = -EFAULT; in kvm_arch_vm_ioctl()
3079 r = -EFAULT; in kvm_arch_vm_ioctl()
3089 r = -EFAULT; in kvm_arch_vm_ioctl()
3092 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3094 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3098 r = -EFAULT; in kvm_arch_vm_ioctl()
3105 r = -EFAULT; in kvm_arch_vm_ioctl()
3108 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3110 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3120 r = -EINVAL; in kvm_arch_vm_ioctl()
3124 r = -EFAULT; in kvm_arch_vm_ioctl()
3128 r = -EINVAL; in kvm_arch_vm_ioctl()
3131 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3134 r = -EFAULT; in kvm_arch_vm_ioctl()
3145 r = -EFAULT; in kvm_arch_vm_ioctl()
3151 r = -EINVAL; in kvm_arch_vm_ioctl()
3155 r = -EFAULT; in kvm_arch_vm_ioctl()
3162 r = -ENOTTY; in kvm_arch_vm_ioctl()
3190 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb); in kvm_s390_set_crycb_format()
3192 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3193 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3200 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3202 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3217 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3223 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3227 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3229 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3232 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3235 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3241 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3242 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3243 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3267 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3274 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3275 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3276 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3277 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3297 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3299 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3305 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3306 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3307 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3308 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3309 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3310 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3315 if (kvm->arch.use_esca) in sca_dispose()
3316 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3318 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3319 kvm->arch.sca = NULL; in sca_dispose()
3337 rc = -EINVAL; in kvm_arch_init_vm()
3352 rc = -ENOMEM; in kvm_arch_init_vm()
3356 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3358 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3359 if (!kvm->arch.sca) in kvm_arch_init_vm()
3365 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3366 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3369 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
3371 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3372 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3376 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3378 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3381 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3382 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3385 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3388 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3391 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3393 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3394 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3395 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3397 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3398 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3400 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3401 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3405 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3407 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3408 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3410 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3415 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3418 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3421 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3422 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3424 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3425 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3426 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3428 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3440 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3441 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3442 /* one flat fake memslot covering the whole address-space */ in kvm_arch_init_vm()
3443 mutex_lock(&kvm->slots_lock); in kvm_arch_init_vm()
3445 mutex_unlock(&kvm->slots_lock); in kvm_arch_init_vm()
3448 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3450 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3452 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3453 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3455 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3456 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3459 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3460 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3461 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3465 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3466 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3467 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3471 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3472 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3483 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3486 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3488 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3490 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3491 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3493 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3498 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3509 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3520 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3521 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3523 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3524 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3526 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3536 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3537 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3538 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3539 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3548 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3549 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3550 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3552 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
3553 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3555 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3557 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
3558 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3560 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3566 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3569 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3570 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3573 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3574 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3575 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3578 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3579 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3580 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3581 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3582 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
3584 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3587 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3588 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3589 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3590 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
3592 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3598 d->sda = s->sda; in sca_copy_entry()
3599 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
3600 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
3607 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
3608 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
3610 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
3615 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3622 if (kvm->arch.use_esca) in sca_switch_to_extended()
3627 return -ENOMEM; in sca_switch_to_extended()
3634 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3639 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
3640 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
3641 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
3643 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3644 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3646 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3651 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3652 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3670 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3678 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3679 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3680 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3681 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3687 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3688 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3689 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3690 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3691 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3697 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3698 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3705 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3707 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3724 /* set the cpu timer - may only be called from the VCPU thread itself */
3728 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3729 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3730 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3731 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3732 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3736 /* update and get the cpu timer - can also be called from other VCPU threads */
3742 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3743 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3747 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3752 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3753 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3755 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3756 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3757 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3766 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3768 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3773 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3774 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3782 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3784 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3785 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3787 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3788 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3789 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3792 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3793 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3798 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3828 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3831 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3832 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3833 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3834 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC); in kvm_s390_vcpu_crypto_setup()
3836 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3837 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3840 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3841 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3843 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3844 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3845 if (kvm_has_pckmo_hmac(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3846 vcpu->arch.sie_block->ecd |= ECD_HMAC; in kvm_s390_vcpu_crypto_setup()
3849 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3850 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3855 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3856 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3864 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3866 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3872 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3874 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3875 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3876 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3884 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3888 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3890 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3897 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3898 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3899 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3900 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3901 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3902 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3903 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3904 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3905 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3907 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3908 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3909 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3910 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3911 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3913 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3915 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3917 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3919 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3920 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3921 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3922 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3924 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3925 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3926 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3927 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3928 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3929 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3930 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3931 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3933 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3934 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3939 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3941 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3946 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3947 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3949 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3955 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3956 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3961 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3969 return -EINVAL; in kvm_arch_vcpu_precreate()
3981 return -ENOMEM; in kvm_arch_vcpu_create()
3983 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3984 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3987 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3988 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3990 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3991 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3992 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3993 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3995 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3997 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
4004 vcpu->arch.acrs_loaded = false; in kvm_arch_vcpu_create()
4006 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
4007 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
4008 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
4009 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
4010 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
4011 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
4012 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
4013 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
4018 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
4020 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
4022 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
4028 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
4029 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4030 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4036 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
4040 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
4041 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
4043 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
4049 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4055 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
4060 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
4066 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
4071 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
4077 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
4083 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
4094 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4108 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
4110 unsigned long prefix; in kvm_gmap_notifier() local
4118 /* We are only interested in prefix pages */ in kvm_gmap_notifier()
4121 /* match against both prefix pages */ in kvm_gmap_notifier()
4122 prefix = kvm_s390_get_prefix(vcpu); in kvm_gmap_notifier()
4123 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4124 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4134 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >= in kvm_arch_no_poll()
4136 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4152 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4154 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4156 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4157 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4160 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4161 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4165 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4168 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4169 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4172 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4173 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4176 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4177 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4180 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4181 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4184 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4185 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4188 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4189 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4201 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4204 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4206 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4207 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4210 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4211 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4214 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4219 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4220 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4223 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4224 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4225 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4229 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4230 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4233 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4234 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4237 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4238 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4241 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4242 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4253 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4254 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4255 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4258 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4272 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4273 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4276 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4277 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4278 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4279 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4282 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4283 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4284 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4285 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4286 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4287 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4288 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4289 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4290 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4291 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4292 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4293 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4300 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4301 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4302 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4303 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4309 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4314 memset(&regs->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4315 memset(&regs->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4316 memset(&regs->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4317 memset(&regs->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4319 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4320 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4326 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4334 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4344 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4345 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4356 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4357 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4369 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4371 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4372 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4374 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4385 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4386 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4388 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4389 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4400 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4402 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4403 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4411 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4425 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4428 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4429 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4433 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4437 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4438 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4442 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4446 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4450 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4482 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4483 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4485 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4494 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4502 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4516 struct kvm *kvm = gmap->private; in __kvm_s390_fixup_fault_sync()
4529 rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); in __kvm_s390_fixup_fault_sync()
4536 * __kvm_s390_mprotect_many() - Apply specified protection to guest pages
4543 * Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
4545 * Context: kvm->srcu and gmap->mm need to be held in read mode
4556 if (rc == -EAGAIN) { in __kvm_s390_mprotect_many()
4572 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_mprotect_notify_prefix()
4573 mmap_read_lock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4575 rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT); in kvm_s390_mprotect_notify_prefix()
4577 mmap_read_unlock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4578 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_s390_mprotect_notify_prefix()
4590 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4591 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock. in kvm_s390_handle_requests()
4608 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4614 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4622 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4629 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4639 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4645 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4648 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4649 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4650 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4670 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4671 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4673 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4674 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4675 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4680 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4681 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4690 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4693 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4710 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4717 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4718 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4726 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4727 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4750 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4752 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4753 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4759 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4761 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4764 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr); in kvm_arch_setup_async_pf()
4765 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4768 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch); in kvm_arch_setup_async_pf()
4782 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4783 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4788 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4803 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4805 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4806 current->thread.gmap_int_code = 0; in vcpu_pre_run()
4807 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4833 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_addressing_exception()
4838 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_addressing_exception()
4842 pgm_info = vcpu->arch.pgm; in vcpu_post_run_addressing_exception()
4852 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm, in kvm_s390_assert_primary_as()
4854 current->thread.gmap_int_code, current->thread.gmap_teid.val); in kvm_s390_assert_primary_as()
4858 * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
4878 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in __kvm_s390_handle_dat_fault()
4882 if (vcpu->arch.gmap->pfault_enabled) in __kvm_s390_handle_dat_fault()
4894 return -EAGAIN; in __kvm_s390_handle_dat_fault()
4896 /* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT) */ in __kvm_s390_handle_dat_fault()
4901 vcpu->stat.pfault_sync++; in __kvm_s390_handle_dat_fault()
4902 /* Could not setup async pfault, try again synchronously */ in __kvm_s390_handle_dat_fault()
4908 return -EFAULT; in __kvm_s390_handle_dat_fault()
4911 mmap_read_lock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4913 rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked); in __kvm_s390_handle_dat_fault()
4915 rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr); in __kvm_s390_handle_dat_fault()
4916 scoped_guard(spinlock, &vcpu->kvm->mmu_lock) { in __kvm_s390_handle_dat_fault()
4917 kvm_release_faultin_page(vcpu->kvm, page, false, writable); in __kvm_s390_handle_dat_fault()
4919 mmap_read_unlock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4929 if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_dat_fault_handler()
4931 * This translates the per-vCPU guest address into a in vcpu_dat_fault_handler()
4937 mmap_read_lock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4938 gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr); in vcpu_dat_fault_handler()
4939 mmap_read_unlock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4940 if (gaddr_tmp == -EFAULT) { in vcpu_dat_fault_handler()
4941 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_dat_fault_handler()
4942 vcpu->run->s390_ucontrol.trans_exc_code = gaddr; in vcpu_dat_fault_handler()
4943 vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION; in vcpu_dat_fault_handler()
4944 return -EREMOTE; in vcpu_dat_fault_handler()
4956 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; in vcpu_post_run_handle_fault()
4960 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { in vcpu_post_run_handle_fault()
4962 vcpu->stat.exit_null++; in vcpu_post_run_handle_fault()
4971 if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL) in vcpu_post_run_handle_fault()
4983 if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) { in vcpu_post_run_handle_fault()
4993 current->thread.gmap_int_code, current->comm, in vcpu_post_run_handle_fault()
4994 current->pid); in vcpu_post_run_handle_fault()
5008 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", in vcpu_post_run_handle_fault()
5009 current->thread.gmap_int_code, current->thread.gmap_teid.val); in vcpu_post_run_handle_fault()
5023 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5024 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5029 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
5030 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
5032 if (exit_reason == -EINTR) { in vcpu_post_run()
5034 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
5036 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
5041 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
5044 if (rc != -EOPNOTSUPP) in vcpu_post_run()
5046 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
5047 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
5048 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
5049 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
5050 return -EREMOTE; in vcpu_post_run()
5060 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
5063 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
5083 memcpy(sie_page->pv_grregs, in __vcpu_run()
5084 vcpu->run->s.regs.gprs, in __vcpu_run()
5085 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5087 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
5088 vcpu->run->s.regs.gprs, in __vcpu_run()
5089 vcpu->arch.gmap->asce); in __vcpu_run()
5091 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
5092 sie_page->pv_grregs, in __vcpu_run()
5093 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5096 * that leave the guest state in an "in-between" state in __vcpu_run()
5100 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
5101 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
5102 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
5120 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
5124 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
5125 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
5126 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
5127 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
5128 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
5129 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
5130 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
5131 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
5133 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
5134 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
5135 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
5136 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
5137 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
5140 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
5141 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
5142 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
5143 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
5149 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
5150 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
5151 riccb->v && in sync_regs_fmt2()
5152 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
5154 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
5157 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
5160 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
5161 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
5162 gscb->gssm && in sync_regs_fmt2()
5163 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5165 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
5166 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
5167 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
5169 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
5170 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5171 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
5172 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
5177 if (current->thread.gs_cb) { in sync_regs_fmt2()
5178 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
5179 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
5181 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5182 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
5183 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
5184 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
5193 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
5195 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
5196 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
5197 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
5198 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
5202 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
5203 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
5204 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
5206 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
5207 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
5208 vcpu->arch.acrs_loaded = true; in sync_regs()
5209 kvm_s390_fpu_load(vcpu->run); in sync_regs()
5218 * (e.g. 112 prefix page not secure). We do this by turning in sync_regs()
5223 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
5224 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
5228 kvm_run->kvm_dirty_regs = 0; in sync_regs()
5233 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
5235 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
5236 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
5237 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
5238 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
5239 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5243 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5244 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5245 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5246 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5247 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5249 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5257 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5259 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5260 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5261 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5262 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5263 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5264 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5265 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5266 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5267 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5268 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5269 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5270 vcpu->arch.acrs_loaded = false; in store_regs()
5271 kvm_s390_fpu_store(vcpu->run); in store_regs()
5278 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5288 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5289 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5291 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
5292 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5294 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5295 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5296 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5312 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5316 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5317 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5329 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5330 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5338 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5349 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5358 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5359 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5372 return -EFAULT; in kvm_s390_store_status_unloaded()
5376 return -EFAULT; in kvm_s390_store_status_unloaded()
5379 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5383 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5388 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5391 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5393 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5397 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5399 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5403 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5407 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5409 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5410 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5420 kvm_s390_fpu_store(vcpu->run); in kvm_s390_vcpu_store_status()
5421 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5457 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5459 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5460 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5466 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5472 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5477 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5485 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5495 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5501 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5513 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5515 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5516 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5522 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5539 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5555 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5564 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5565 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5567 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5569 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5570 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5571 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5572 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5577 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5586 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5590 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5591 return -EINVAL; in kvm_s390_vcpu_sida_op()
5592 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5593 return -EINVAL; in kvm_s390_vcpu_sida_op()
5594 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5595 return -E2BIG; in kvm_s390_vcpu_sida_op()
5597 return -EINVAL; in kvm_s390_vcpu_sida_op()
5599 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5601 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5603 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5604 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5608 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5609 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5618 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5628 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5629 return -EINVAL; in kvm_s390_vcpu_mem_op()
5631 return -EINVAL; in kvm_s390_vcpu_mem_op()
5632 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5633 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5635 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5638 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5639 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5640 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5641 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5645 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5646 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5649 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5650 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5654 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5655 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5658 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5659 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5663 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5664 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5676 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5678 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5685 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5689 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5692 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5699 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
5708 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5717 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5719 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
5724 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
5729 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_async_ioctl()
5736 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_async_ioctl()
5749 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5750 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5752 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5753 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5757 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5761 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5765 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5767 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5770 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5773 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5777 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5786 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5796 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5798 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5803 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5843 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5846 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5860 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5864 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5865 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5869 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5877 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5881 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5882 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5886 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5892 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5894 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5900 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5912 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5918 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5924 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5936 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5940 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5952 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5956 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5960 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5973 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5977 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
5987 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
5988 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5989 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5990 get_page(vmf->page); in kvm_arch_vcpu_fault()
6010 if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) in kvm_arch_prepare_memory_region()
6011 return -EINVAL; in kvm_arch_prepare_memory_region()
6015 return -EINVAL; in kvm_arch_prepare_memory_region()
6025 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
6026 return -EINVAL; in kvm_arch_prepare_memory_region()
6028 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
6030 return -EINVAL; in kvm_arch_prepare_memory_region()
6032 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
6033 return -EINVAL; in kvm_arch_prepare_memory_region()
6036 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
6041 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
6042 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
6048 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
6067 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6068 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6071 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6072 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6077 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
6078 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6079 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6104 return -ENODEV; in kvm_s390_init()
6109 return -EINVAL; in kvm_s390_init()