Lines Matching +full:vrs +full:- +full:10
1 // SPDX-License-Identifier: GPL-2.0
13 #define pr_fmt(fmt) "kvm-s390: " fmt
16 #include <linux/entry-virt.h>
38 #include <asm/access-regs.h>
39 #include <asm/asm-offsets.h>
54 #include "kvm-s390.h"
60 #include "trace-s390.h"
212 static u8 halt_poll_max_steal = 10;
228 * the feature is opt-in anyway
243 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
283 * -delta to the epoch. in kvm_clock_sync_scb()
285 delta = -delta; in kvm_clock_sync_scb()
287 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
289 delta_idx = -1; in kvm_clock_sync_scb()
291 scb->epoch += delta; in kvm_clock_sync_scb()
292 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
293 scb->epdx += delta_idx; in kvm_clock_sync_scb()
294 if (scb->epoch < delta) in kvm_clock_sync_scb()
295 scb->epdx += 1; in kvm_clock_sync_scb()
315 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
317 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
318 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
320 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
321 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
322 if (vcpu->arch.vsie_block) in kvm_clock_sync()
323 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
398 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
497 int rc = -ENOMEM; in __kvm_s390_init()
499 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
501 return -ENOMEM; in __kvm_s390_init()
503 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
573 return -EINVAL; in kvm_arch_dev_ioctl()
704 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
708 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
709 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
744 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
746 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
748 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
749 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
759 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
763 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
781 if (cap->flags) in kvm_vm_ioctl_enable_cap()
782 return -EINVAL; in kvm_vm_ioctl_enable_cap()
784 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
787 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
792 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
796 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
797 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
798 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
800 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
801 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
803 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
804 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
807 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
808 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
811 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
812 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
815 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
816 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
819 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
823 set_kvm_facility(kvm->arch.model.fac_mask, 198); in kvm_vm_ioctl_enable_cap()
824 set_kvm_facility(kvm->arch.model.fac_list, 198); in kvm_vm_ioctl_enable_cap()
827 set_kvm_facility(kvm->arch.model.fac_mask, 199); in kvm_vm_ioctl_enable_cap()
828 set_kvm_facility(kvm->arch.model.fac_list, 199); in kvm_vm_ioctl_enable_cap()
832 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
833 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
838 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
839 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
840 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
841 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
843 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
844 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
847 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
852 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
853 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
854 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
856 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
857 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
860 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
865 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
866 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
867 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
868 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
870 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
871 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
874 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
879 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
880 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
881 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
882 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
883 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
886 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
887 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
888 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
894 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
895 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
897 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
903 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
908 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
913 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
914 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
915 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
916 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
918 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
919 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
922 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
928 kvm->arch.user_operexec = 1; in kvm_vm_ioctl_enable_cap()
933 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
943 switch (attr->attr) { in kvm_s390_get_mem_control()
947 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
948 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
949 ret = -EFAULT; in kvm_s390_get_mem_control()
952 ret = -ENXIO; in kvm_s390_get_mem_control()
962 switch (attr->attr) { in kvm_s390_set_mem_control()
964 ret = -ENXIO; in kvm_s390_set_mem_control()
969 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
970 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
971 ret = -EBUSY; in kvm_s390_set_mem_control()
972 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
973 ret = -EINVAL; in kvm_s390_set_mem_control()
975 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
977 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
980 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
983 ret = -ENXIO; in kvm_s390_set_mem_control()
986 ret = -EINVAL; in kvm_s390_set_mem_control()
987 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
991 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
992 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
993 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
994 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
995 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1002 return -EINVAL; in kvm_s390_set_mem_control()
1004 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
1005 return -EFAULT; in kvm_s390_set_mem_control()
1007 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
1008 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
1009 return -E2BIG; in kvm_s390_set_mem_control()
1012 return -EINVAL; in kvm_s390_set_mem_control()
1016 new_limit -= 1; in kvm_s390_set_mem_control()
1018 ret = -EBUSY; in kvm_s390_set_mem_control()
1019 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
1020 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
1022 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
1025 ret = -ENOMEM; in kvm_s390_set_mem_control()
1027 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
1028 new->private = kvm; in kvm_s390_set_mem_control()
1029 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
1033 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1036 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1040 ret = -ENXIO; in kvm_s390_set_mem_control()
1066 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1067 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1070 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1071 return -EINVAL; in kvm_s390_vm_set_crypto()
1074 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1075 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1076 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1081 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1082 return -EINVAL; in kvm_s390_vm_set_crypto()
1085 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1086 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1087 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1092 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1093 return -EINVAL; in kvm_s390_vm_set_crypto()
1095 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1096 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1097 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1102 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1103 return -EINVAL; in kvm_s390_vm_set_crypto()
1105 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1106 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1107 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1112 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1113 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1115 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1119 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1120 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1122 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1125 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1126 return -ENXIO; in kvm_s390_vm_set_crypto()
1130 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1137 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1140 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1141 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1149 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1158 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1180 * Must be called with kvm->srcu held to avoid races on memslots, and with
1181 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1191 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1195 return -EINVAL; in kvm_s390_vm_start_migration()
1197 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1198 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1203 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1204 return -EINVAL; in kvm_s390_vm_start_migration()
1212 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1214 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1215 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1221 * Must be called with kvm->slots_lock to avoid races with ourselves and
1227 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1229 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1230 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1238 int res = -ENXIO; in kvm_s390_vm_set_migration()
1240 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1241 switch (attr->attr) { in kvm_s390_vm_set_migration()
1251 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1259 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1261 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1262 return -ENXIO; in kvm_s390_vm_get_migration()
1264 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1265 return -EFAULT; in kvm_s390_vm_get_migration()
1275 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1276 return -EFAULT; in kvm_s390_set_tod_ext()
1279 return -EINVAL; in kvm_s390_set_tod_ext()
1292 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1294 return -EFAULT; in kvm_s390_set_tod_high()
1297 return -EINVAL; in kvm_s390_set_tod_high()
1307 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1309 return -EFAULT; in kvm_s390_set_tod_low()
1320 if (attr->flags) in kvm_s390_set_tod()
1321 return -EINVAL; in kvm_s390_set_tod()
1323 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1329 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1333 switch (attr->attr) { in kvm_s390_set_tod()
1344 ret = -ENXIO; in kvm_s390_set_tod()
1349 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1362 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1363 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1365 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1366 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1367 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1379 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1380 return -EFAULT; in kvm_s390_get_tod_ext()
1391 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1393 return -EFAULT; in kvm_s390_get_tod_high()
1404 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1405 return -EFAULT; in kvm_s390_get_tod_low()
1415 if (attr->flags) in kvm_s390_get_tod()
1416 return -EINVAL; in kvm_s390_get_tod()
1418 switch (attr->attr) { in kvm_s390_get_tod()
1429 ret = -ENXIO; in kvm_s390_get_tod()
1441 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1442 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1443 ret = -EBUSY; in kvm_s390_set_processor()
1448 ret = -ENOMEM; in kvm_s390_set_processor()
1451 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1453 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1456 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1457 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1458 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1459 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1460 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1462 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1464 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1467 kvm->arch.model.ibc, in kvm_s390_set_processor()
1468 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1470 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1471 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1472 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1474 ret = -EFAULT; in kvm_s390_set_processor()
1477 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1486 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1487 return -EFAULT; in kvm_s390_set_processor_feat()
1491 return -EINVAL; in kvm_s390_set_processor_feat()
1493 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1494 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1495 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1496 return -EBUSY; in kvm_s390_set_processor_feat()
1498 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1499 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1510 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1511 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1512 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1513 return -EBUSY; in kvm_s390_set_processor_subfunc()
1516 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1518 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1519 return -EFAULT; in kvm_s390_set_processor_subfunc()
1521 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1525 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1539 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1548 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1560 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1563 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1566 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1569 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1572 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1578 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1598 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1602 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1603 return -EFAULT; in kvm_s390_set_uv_feat()
1605 return -EINVAL; in kvm_s390_set_uv_feat()
1607 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1608 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1609 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1610 return -EBUSY; in kvm_s390_set_uv_feat()
1612 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1613 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1615 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1622 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1624 switch (attr->attr) { in kvm_s390_set_cpu_model()
1648 ret = -ENOMEM; in kvm_s390_get_processor()
1651 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1652 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1653 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1656 kvm->arch.model.ibc, in kvm_s390_get_processor()
1657 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1659 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1660 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1661 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1662 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1663 ret = -EFAULT; in kvm_s390_get_processor()
1676 ret = -ENOMEM; in kvm_s390_get_machine()
1679 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1680 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1681 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1683 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1686 kvm->arch.model.ibc, in kvm_s390_get_machine()
1687 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1689 mach->fac_mask[0], in kvm_s390_get_machine()
1690 mach->fac_mask[1], in kvm_s390_get_machine()
1691 mach->fac_mask[2]); in kvm_s390_get_machine()
1693 mach->fac_list[0], in kvm_s390_get_machine()
1694 mach->fac_list[1], in kvm_s390_get_machine()
1695 mach->fac_list[2]); in kvm_s390_get_machine()
1696 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1697 ret = -EFAULT; in kvm_s390_get_machine()
1708 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1709 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1710 return -EFAULT; in kvm_s390_get_processor_feat()
1724 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1725 return -EFAULT; in kvm_s390_get_machine_feat()
1736 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1738 return -EFAULT; in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1742 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1747 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1752 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1753 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1755 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1756 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1759 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1761 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1762 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1764 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1765 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1767 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1768 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1770 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1771 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1774 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1776 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1777 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1779 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1780 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1782 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1783 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1785 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1786 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1788 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1789 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1790 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1791 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1793 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1794 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1795 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1796 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1807 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1809 return -EFAULT; in kvm_s390_get_machine_subfunc()
1877 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1878 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1880 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1881 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1882 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1889 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1895 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1896 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1897 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1904 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1906 switch (attr->attr) { in kvm_s390_get_cpu_model()
1936 * kvm_s390_update_topology_change_report - update CPU topology change report
1940 * Updates the Multiprocessor Topology-Change-Report bit to signal
1949 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1950 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1954 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); in kvm_s390_update_topology_change_report()
1961 return -ENXIO; in kvm_s390_set_topo_change_indication()
1963 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1973 return -ENXIO; in kvm_s390_get_topo_change_indication()
1975 topo = kvm->arch.sca->utility.mtcr; in kvm_s390_get_topo_change_indication()
1977 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1984 switch (attr->group) { in kvm_s390_vm_set_attr()
2004 ret = -ENXIO; in kvm_s390_vm_set_attr()
2015 switch (attr->group) { in kvm_s390_vm_get_attr()
2032 ret = -ENXIO; in kvm_s390_vm_get_attr()
2043 switch (attr->group) { in kvm_s390_vm_has_attr()
2045 switch (attr->attr) { in kvm_s390_vm_has_attr()
2048 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2054 ret = -ENXIO; in kvm_s390_vm_has_attr()
2059 switch (attr->attr) { in kvm_s390_vm_has_attr()
2065 ret = -ENXIO; in kvm_s390_vm_has_attr()
2070 switch (attr->attr) { in kvm_s390_vm_has_attr()
2082 ret = -ENXIO; in kvm_s390_vm_has_attr()
2087 switch (attr->attr) { in kvm_s390_vm_has_attr()
2096 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2099 ret = -ENXIO; in kvm_s390_vm_has_attr()
2107 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2110 ret = -ENXIO; in kvm_s390_vm_has_attr()
2123 if (args->flags != 0) in kvm_s390_get_skeys()
2124 return -EINVAL; in kvm_s390_get_skeys()
2127 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2131 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2132 return -EINVAL; in kvm_s390_get_skeys()
2134 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2136 return -ENOMEM; in kvm_s390_get_skeys()
2138 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2139 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2140 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2141 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2143 r = -EFAULT; in kvm_s390_get_skeys()
2147 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2151 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2152 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2155 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2156 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2158 r = -EFAULT; in kvm_s390_get_skeys()
2172 if (args->flags != 0) in kvm_s390_set_skeys()
2173 return -EINVAL; in kvm_s390_set_skeys()
2176 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2177 return -EINVAL; in kvm_s390_set_skeys()
2179 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2181 return -ENOMEM; in kvm_s390_set_skeys()
2183 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2184 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2186 r = -EFAULT; in kvm_s390_set_skeys()
2196 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2197 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2198 while (i < args->count) { in kvm_s390_set_skeys()
2200 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2202 r = -EFAULT; in kvm_s390_set_skeys()
2208 r = -EINVAL; in kvm_s390_set_skeys()
2212 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2214 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2222 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2223 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2241 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2243 args->count = 0; in kvm_s390_peek_cmma()
2244 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2251 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2252 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2254 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2271 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2272 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2274 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2278 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2280 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2284 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2287 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2288 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2289 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2290 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2292 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2305 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2307 args->count = 0; in kvm_s390_get_cmma()
2308 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2314 while (args->count < bufsize) { in kvm_s390_get_cmma()
2319 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2320 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2321 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2324 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2333 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2337 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2361 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2362 return -ENXIO; in kvm_s390_get_cmma_bits()
2364 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2365 return -EINVAL; in kvm_s390_get_cmma_bits()
2367 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2368 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2369 return -EINVAL; in kvm_s390_get_cmma_bits()
2371 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2372 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2377 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2384 return -ENOMEM; in kvm_s390_get_cmma_bits()
2386 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2387 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2392 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2393 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2395 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2396 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2398 args->remaining = 0; in kvm_s390_get_cmma_bits()
2400 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2401 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2410 * set and the mm->context.uses_cmm flag is set.
2419 mask = args->mask; in kvm_s390_set_cmma_bits()
2421 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2422 return -ENXIO; in kvm_s390_set_cmma_bits()
2424 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2425 return -EINVAL; in kvm_s390_set_cmma_bits()
2427 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2428 return -EINVAL; in kvm_s390_set_cmma_bits()
2430 if (args->count == 0) in kvm_s390_set_cmma_bits()
2433 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2435 return -ENOMEM; in kvm_s390_set_cmma_bits()
2437 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2439 r = -EFAULT; in kvm_s390_set_cmma_bits()
2443 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2444 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2445 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2446 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2448 r = -EFAULT; in kvm_s390_set_cmma_bits()
2455 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2457 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2458 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2460 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2461 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2462 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2463 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2471 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2481 * Return: 0 in case of success, otherwise -EIO
2499 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2503 ret = -EIO; in kvm_s390_cpus_from_pv()
2505 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2507 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2514 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2522 * Return: 0 in case of success, otherwise -EIO
2537 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2539 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2560 switch (info->header.id) { in kvm_s390_handle_pv_info()
2562 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2564 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2565 return -EINVAL; in kvm_s390_handle_pv_info()
2567 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2572 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2573 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2574 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2575 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2580 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2582 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2583 return -EINVAL; in kvm_s390_handle_pv_info()
2585 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2586 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2587 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2591 return -EINVAL; in kvm_s390_handle_pv_info()
2598 int r = -EINVAL; in kvm_s390_pv_dmp()
2603 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2613 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2615 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2617 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2620 r = -EINVAL; in kvm_s390_pv_dmp()
2625 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2634 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2638 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2641 r = -EINVAL; in kvm_s390_pv_dmp()
2646 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2650 r = -ENOTTY; in kvm_s390_pv_dmp()
2659 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2660 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2665 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2667 switch (cmd->cmd) { in kvm_s390_handle_pv()
2669 r = -EINVAL; in kvm_s390_handle_pv()
2673 mmap_write_lock(kvm->mm); in kvm_s390_handle_pv()
2675 mmap_write_unlock(kvm->mm); in kvm_s390_handle_pv()
2679 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2683 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2688 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2692 r = -EINVAL; in kvm_s390_handle_pv()
2696 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2704 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2707 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2710 r = -EINVAL; in kvm_s390_handle_pv()
2713 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2714 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2717 r = -EINVAL; in kvm_s390_handle_pv()
2721 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2729 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2732 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2739 r = -EINVAL; in kvm_s390_handle_pv()
2743 r = -EFAULT; in kvm_s390_handle_pv()
2748 r = -EINVAL; in kvm_s390_handle_pv()
2752 r = -ENOMEM; in kvm_s390_handle_pv()
2757 r = -EFAULT; in kvm_s390_handle_pv()
2761 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2769 r = -EINVAL; in kvm_s390_handle_pv()
2770 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2773 r = -EFAULT; in kvm_s390_handle_pv()
2778 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2782 r = -EINVAL; in kvm_s390_handle_pv()
2787 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2788 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2789 cmd->rrc); in kvm_s390_handle_pv()
2793 r = -EINVAL; in kvm_s390_handle_pv()
2798 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2800 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2804 r = -EINVAL; in kvm_s390_handle_pv()
2809 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2811 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2827 r = -EFAULT; in kvm_s390_handle_pv()
2831 r = -EINVAL; in kvm_s390_handle_pv()
2847 r = -EFAULT; in kvm_s390_handle_pv()
2857 r = -EINVAL; in kvm_s390_handle_pv()
2861 r = -EFAULT; in kvm_s390_handle_pv()
2870 r = -EFAULT; in kvm_s390_handle_pv()
2877 r = -ENOTTY; in kvm_s390_handle_pv()
2880 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2887 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2888 return -EINVAL; in mem_op_validate_common()
2889 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2890 return -E2BIG; in mem_op_validate_common()
2891 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2892 if (mop->key > 0xf) in mem_op_validate_common()
2893 return -EINVAL; in mem_op_validate_common()
2895 mop->key = 0; in mem_op_validate_common()
2902 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2912 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2913 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2915 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2918 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2920 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2925 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2926 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2927 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2931 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2932 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2935 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2936 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2938 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2939 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2942 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2943 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2947 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2955 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2956 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2961 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2973 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2974 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2975 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2976 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2977 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2978 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2980 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2982 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2987 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2988 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2989 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2990 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2993 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
3000 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
3001 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
3003 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
3009 return -EINVAL; in kvm_s390_vm_mem_op()
3011 switch (mop->op) { in kvm_s390_vm_mem_op()
3018 return -EINVAL; in kvm_s390_vm_mem_op()
3024 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
3033 r = -EFAULT; in kvm_arch_vm_ioctl()
3040 r = -EINVAL; in kvm_arch_vm_ioctl()
3041 if (kvm->arch.use_irqchip) in kvm_arch_vm_ioctl()
3046 r = -EFAULT; in kvm_arch_vm_ioctl()
3053 r = -EFAULT; in kvm_arch_vm_ioctl()
3060 r = -EFAULT; in kvm_arch_vm_ioctl()
3069 r = -EFAULT; in kvm_arch_vm_ioctl()
3079 r = -EFAULT; in kvm_arch_vm_ioctl()
3089 r = -EFAULT; in kvm_arch_vm_ioctl()
3092 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3094 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3098 r = -EFAULT; in kvm_arch_vm_ioctl()
3105 r = -EFAULT; in kvm_arch_vm_ioctl()
3108 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3110 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3120 r = -EINVAL; in kvm_arch_vm_ioctl()
3124 r = -EFAULT; in kvm_arch_vm_ioctl()
3128 r = -EINVAL; in kvm_arch_vm_ioctl()
3131 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3134 r = -EFAULT; in kvm_arch_vm_ioctl()
3145 r = -EFAULT; in kvm_arch_vm_ioctl()
3151 r = -EINVAL; in kvm_arch_vm_ioctl()
3155 r = -EFAULT; in kvm_arch_vm_ioctl()
3162 r = -ENOTTY; in kvm_arch_vm_ioctl()
3190 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb); in kvm_s390_set_crycb_format()
3192 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3193 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3200 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3202 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3217 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3223 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3227 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3229 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3232 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3235 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3241 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3242 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3243 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3267 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3274 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3275 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3276 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3277 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3297 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3299 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3305 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3306 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3307 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3308 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3309 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3310 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3315 free_pages_exact(kvm->arch.sca, sizeof(*kvm->arch.sca)); in sca_dispose()
3316 kvm->arch.sca = NULL; in sca_dispose()
3333 rc = -EINVAL; in kvm_arch_init_vm()
3348 rc = -ENOMEM; in kvm_arch_init_vm()
3354 kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags); in kvm_arch_init_vm()
3356 if (!kvm->arch.sca) in kvm_arch_init_vm()
3359 snprintf(debug_name, sizeof(debug_name), "kvm-%u", current->pid); in kvm_arch_init_vm()
3361 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3362 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3366 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3368 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3371 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3372 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3375 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3378 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3381 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3383 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3384 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3385 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3387 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3388 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3390 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3391 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3395 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3397 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3398 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3400 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3405 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3408 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3411 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3412 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3414 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3415 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3416 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3418 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3430 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3431 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3432 /* one flat fake memslot covering the whole address-space */ in kvm_arch_init_vm()
3433 mutex_lock(&kvm->slots_lock); in kvm_arch_init_vm()
3435 mutex_unlock(&kvm->slots_lock); in kvm_arch_init_vm()
3438 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3440 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3442 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3443 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3445 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3446 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3449 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3450 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3451 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3455 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3456 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3457 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3461 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3462 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3473 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3476 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3478 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3480 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3481 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3483 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3488 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3499 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3510 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3511 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3513 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3514 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3516 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3526 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3527 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3528 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3529 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3536 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3541 clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn); in sca_del_vcpu()
3542 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3547 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3551 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3552 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3553 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3558 set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn); in sca_add_vcpu()
3559 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3573 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3574 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3575 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3576 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3582 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3583 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3584 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3585 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3586 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3592 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3593 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3600 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3602 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3619 /* set the cpu timer - may only be called from the VCPU thread itself */
3623 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3624 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3625 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3626 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3627 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3631 /* update and get the cpu timer - can also be called from other VCPU threads */
3637 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3638 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3642 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3647 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3648 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3650 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3651 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3652 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3661 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3663 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3668 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3669 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3677 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3679 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3680 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3682 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3683 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3684 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3687 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3688 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3693 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3723 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3726 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3727 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3728 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3729 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC); in kvm_s390_vcpu_crypto_setup()
3731 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3732 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3735 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3736 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3738 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3739 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3740 if (kvm_has_pckmo_hmac(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3741 vcpu->arch.sie_block->ecd |= ECD_HMAC; in kvm_s390_vcpu_crypto_setup()
3744 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3745 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3750 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3751 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3759 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3761 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3767 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3769 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3770 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3771 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3779 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3783 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3785 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3792 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3793 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3794 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3795 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3796 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3797 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3798 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3799 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3800 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3802 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3803 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3804 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3805 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3806 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3808 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3810 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3812 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3814 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3815 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3816 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3817 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3819 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3820 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3821 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3822 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3823 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3824 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3825 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3826 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3828 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3829 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3834 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3836 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3841 hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC, in kvm_s390_vcpu_setup()
3844 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3850 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3851 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3856 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3864 return -EINVAL; in kvm_arch_vcpu_precreate()
3876 return -ENOMEM; in kvm_arch_vcpu_create()
3878 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3879 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3882 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3883 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3885 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3886 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3887 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3888 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3890 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3892 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3899 vcpu->arch.acrs_loaded = false; in kvm_arch_vcpu_create()
3901 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3902 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3903 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3904 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3905 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3906 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3907 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3908 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3909 /* fprs can be synchronized via vrs, even if the guest has no vx. With in kvm_arch_vcpu_create()
3910 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format. in kvm_arch_vcpu_create()
3913 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3915 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3917 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3923 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p", in kvm_arch_vcpu_create()
3924 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3925 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3931 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
3935 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3936 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3938 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3944 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3950 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
3955 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
3961 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
3966 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
3972 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
3978 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
3989 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4003 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
4018 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4019 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4029 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >= in kvm_arch_no_poll()
4031 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4047 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4049 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4051 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4052 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4055 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4056 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4060 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4063 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4064 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4067 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4068 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4071 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4072 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4075 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4076 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4079 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4080 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4083 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4084 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4096 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4099 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4101 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4102 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4105 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4106 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4109 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4114 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4115 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4118 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4119 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4120 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4124 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4125 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4128 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4129 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4132 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4133 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4136 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4137 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4148 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4149 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4150 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4153 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4167 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4168 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4171 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4172 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4173 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4174 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4177 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4178 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4179 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4180 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4181 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4182 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4183 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4184 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4185 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4186 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4187 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4188 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4195 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4196 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4197 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4198 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4204 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4209 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4210 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4211 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4212 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4214 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4215 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4221 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4229 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4239 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4240 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4251 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4252 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4262 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4264 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4265 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4267 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4278 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4279 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4281 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4282 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4293 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4295 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4296 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4304 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4318 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4321 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4322 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4326 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4330 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4331 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4335 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4339 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4343 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4375 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4376 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4378 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4387 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4395 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4409 struct kvm *kvm = gmap->private; in __kvm_s390_fixup_fault_sync()
4422 rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); in __kvm_s390_fixup_fault_sync()
4429 * __kvm_s390_mprotect_many() - Apply specified protection to guest pages
4436 * Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
4438 * Context: kvm->srcu and gmap->mm need to be held in read mode
4449 if (rc == -EAGAIN) { in __kvm_s390_mprotect_many()
4465 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_mprotect_notify_prefix()
4466 mmap_read_lock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4468 rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT); in kvm_s390_mprotect_notify_prefix()
4470 mmap_read_unlock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4471 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_s390_mprotect_notify_prefix()
4483 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4501 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4507 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4515 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4522 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4532 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4538 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4541 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4542 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4543 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4563 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4564 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4566 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4567 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4568 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4573 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4574 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4583 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4586 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4603 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4610 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4611 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4619 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4620 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4643 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4645 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4646 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4652 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4654 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4657 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr); in kvm_arch_setup_async_pf()
4658 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4661 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch); in kvm_arch_setup_async_pf()
4675 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4676 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4678 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4693 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4695 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4696 current->thread.gmap_int_code = 0; in vcpu_pre_run()
4697 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4723 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_addressing_exception()
4728 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_addressing_exception()
4732 pgm_info = vcpu->arch.pgm; in vcpu_post_run_addressing_exception()
4742 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm, in kvm_s390_assert_primary_as()
4744 current->thread.gmap_int_code, current->thread.gmap_teid.val); in kvm_s390_assert_primary_as()
4748 * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
4768 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in __kvm_s390_handle_dat_fault()
4772 if (vcpu->arch.gmap->pfault_enabled) in __kvm_s390_handle_dat_fault()
4784 return -EAGAIN; in __kvm_s390_handle_dat_fault()
4791 vcpu->stat.pfault_sync++; in __kvm_s390_handle_dat_fault()
4798 return -EFAULT; in __kvm_s390_handle_dat_fault()
4801 mmap_read_lock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4803 rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked); in __kvm_s390_handle_dat_fault()
4805 rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr); in __kvm_s390_handle_dat_fault()
4806 scoped_guard(spinlock, &vcpu->kvm->mmu_lock) { in __kvm_s390_handle_dat_fault()
4807 kvm_release_faultin_page(vcpu->kvm, page, false, writable); in __kvm_s390_handle_dat_fault()
4809 mmap_read_unlock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4819 if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_dat_fault_handler()
4821 * This translates the per-vCPU guest address into a in vcpu_dat_fault_handler()
4827 mmap_read_lock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4828 gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr); in vcpu_dat_fault_handler()
4829 mmap_read_unlock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4830 if (gaddr_tmp == -EFAULT) { in vcpu_dat_fault_handler()
4831 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_dat_fault_handler()
4832 vcpu->run->s390_ucontrol.trans_exc_code = gaddr; in vcpu_dat_fault_handler()
4833 vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION; in vcpu_dat_fault_handler()
4834 return -EREMOTE; in vcpu_dat_fault_handler()
4847 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; in vcpu_post_run_handle_fault()
4851 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { in vcpu_post_run_handle_fault()
4853 vcpu->stat.exit_null++; in vcpu_post_run_handle_fault()
4864 if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) { in vcpu_post_run_handle_fault()
4874 current->thread.gmap_int_code, current->comm, in vcpu_post_run_handle_fault()
4875 current->pid); in vcpu_post_run_handle_fault()
4886 rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr); in vcpu_post_run_handle_fault()
4887 if (rc == -EINVAL) in vcpu_post_run_handle_fault()
4889 if (rc != -ENXIO) in vcpu_post_run_handle_fault()
4903 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", in vcpu_post_run_handle_fault()
4904 current->thread.gmap_int_code, current->thread.gmap_teid.val); in vcpu_post_run_handle_fault()
4918 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4919 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4924 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4925 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4927 if (exit_reason == -EINTR) { in vcpu_post_run()
4929 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4931 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4936 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4939 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4941 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4942 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4943 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4944 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4945 return -EREMOTE; in vcpu_post_run()
4974 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4977 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4994 memcpy(sie_page->pv_grregs, in __vcpu_run()
4995 vcpu->run->s.regs.gprs, in __vcpu_run()
4996 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5013 exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block, in __vcpu_run()
5014 vcpu->run->s.regs.gprs, in __vcpu_run()
5015 vcpu->arch.gmap->asce); in __vcpu_run()
5022 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
5023 sie_page->pv_grregs, in __vcpu_run()
5024 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5027 * that leave the guest state in an "in-between" state in __vcpu_run()
5031 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
5032 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
5033 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
5050 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
5054 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
5055 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
5056 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
5057 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
5058 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
5059 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
5060 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
5061 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
5063 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
5064 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
5065 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
5066 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
5067 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
5070 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
5071 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
5072 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
5073 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
5079 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
5080 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
5081 riccb->v && in sync_regs_fmt2()
5082 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
5084 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
5087 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
5090 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
5091 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
5092 gscb->gssm && in sync_regs_fmt2()
5093 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5095 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
5096 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
5097 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
5099 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
5100 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5101 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
5102 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
5107 if (current->thread.gs_cb) { in sync_regs_fmt2()
5108 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
5109 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
5111 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5112 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
5113 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
5114 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
5123 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
5125 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
5126 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
5127 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
5128 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
5132 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
5133 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
5134 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
5136 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
5137 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
5138 vcpu->arch.acrs_loaded = true; in sync_regs()
5139 kvm_s390_fpu_load(vcpu->run); in sync_regs()
5153 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
5154 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
5158 kvm_run->kvm_dirty_regs = 0; in sync_regs()
5163 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
5165 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
5166 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
5167 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
5168 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
5169 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5173 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5174 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5175 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5176 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5177 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5179 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5187 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5189 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5190 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5191 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5192 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5193 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5194 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5195 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5196 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5197 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5198 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5199 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5200 vcpu->arch.acrs_loaded = false; in store_regs()
5201 kvm_s390_fpu_store(vcpu->run); in store_regs()
5208 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5218 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5219 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5221 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
5222 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5224 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5225 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5226 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5242 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5246 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5247 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5259 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5260 vcpu->stat.signal_exits++; in kvm_arch_vcpu_ioctl_run()
5261 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5269 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5280 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5289 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5290 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5303 return -EFAULT; in kvm_s390_store_status_unloaded()
5307 return -EFAULT; in kvm_s390_store_status_unloaded()
5310 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5314 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5319 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5322 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5324 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5328 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5330 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5334 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5338 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5340 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5341 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5351 kvm_s390_fpu_store(vcpu->run); in kvm_s390_vcpu_store_status()
5352 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5388 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5390 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5391 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5397 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5403 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5408 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5416 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5426 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5432 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5444 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5446 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5447 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5453 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5470 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5486 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5495 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5496 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5498 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5500 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5501 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5502 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5503 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5508 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5517 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5521 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5522 return -EINVAL; in kvm_s390_vcpu_sida_op()
5523 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5524 return -EINVAL; in kvm_s390_vcpu_sida_op()
5525 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5526 return -E2BIG; in kvm_s390_vcpu_sida_op()
5528 return -EINVAL; in kvm_s390_vcpu_sida_op()
5530 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5532 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5534 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5535 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5539 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5540 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5549 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5559 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5560 return -EINVAL; in kvm_s390_vcpu_mem_op()
5562 return -EINVAL; in kvm_s390_vcpu_mem_op()
5563 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5564 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5566 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5569 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5570 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5571 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5572 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5576 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5577 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5580 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5581 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5585 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5586 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5589 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5590 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5594 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5595 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5607 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5609 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5616 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5620 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5623 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5630 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_unlocked_ioctl()
5639 return -EFAULT; in kvm_arch_vcpu_unlocked_ioctl()
5648 return -EFAULT; in kvm_arch_vcpu_unlocked_ioctl()
5650 return -EINVAL; in kvm_arch_vcpu_unlocked_ioctl()
5655 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_unlocked_ioctl()
5660 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_unlocked_ioctl()
5667 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_unlocked_ioctl()
5680 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5681 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5683 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5684 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5688 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5692 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5696 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5698 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5701 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5704 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5708 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5717 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5727 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5729 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5734 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5774 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5777 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5791 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5795 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5796 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5800 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5808 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5812 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5813 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5817 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5823 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5825 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5831 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5843 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5849 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5855 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5867 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5871 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5883 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5887 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5891 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5904 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5908 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
5918 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
5919 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5920 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5921 get_page(vmf->page); in kvm_arch_vcpu_fault()
5941 if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) in kvm_arch_prepare_memory_region()
5942 return -EINVAL; in kvm_arch_prepare_memory_region()
5946 return -EINVAL; in kvm_arch_prepare_memory_region()
5956 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
5957 return -EINVAL; in kvm_arch_prepare_memory_region()
5959 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
5961 return -EINVAL; in kvm_arch_prepare_memory_region()
5963 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5964 return -EINVAL; in kvm_arch_prepare_memory_region()
5967 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5972 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
5973 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
5979 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
5998 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5999 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6002 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6003 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6008 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
6009 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6010 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6035 return -ENODEV; in kvm_s390_init()
6040 return -EINVAL; in kvm_s390_init()