Lines Matching +full:usecase +full:- +full:specific
1 // SPDX-License-Identifier: GPL-2.0
13 #define KMSG_COMPONENT "kvm-s390"
38 #include <asm/access-regs.h>
39 #include <asm/asm-offsets.h>
54 #include "kvm-s390.h"
60 #include "trace-s390.h"
227 * the feature is opt-in anyway
242 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
283 * -delta to the epoch. in kvm_clock_sync_scb()
285 delta = -delta; in kvm_clock_sync_scb()
287 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
289 delta_idx = -1; in kvm_clock_sync_scb()
291 scb->epoch += delta; in kvm_clock_sync_scb()
292 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
293 scb->epdx += delta_idx; in kvm_clock_sync_scb()
294 if (scb->epoch < delta) in kvm_clock_sync_scb()
295 scb->epdx += 1; in kvm_clock_sync_scb()
315 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
317 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
318 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
320 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
321 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
322 if (vcpu->arch.vsie_block) in kvm_clock_sync()
323 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
398 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
497 int rc = -ENOMEM; in __kvm_s390_init()
499 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
501 return -ENOMEM; in __kvm_s390_init()
503 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
573 return -EINVAL; in kvm_arch_dev_ioctl()
701 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
705 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
706 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
741 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
743 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
745 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
746 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
756 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
760 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
778 if (cap->flags) in kvm_vm_ioctl_enable_cap()
779 return -EINVAL; in kvm_vm_ioctl_enable_cap()
781 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
784 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
789 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
793 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
794 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
795 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
797 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
798 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
800 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
801 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
804 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
805 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
808 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
809 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
812 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
813 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
816 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
817 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_mask, 198); in kvm_vm_ioctl_enable_cap()
821 set_kvm_facility(kvm->arch.model.fac_list, 198); in kvm_vm_ioctl_enable_cap()
824 set_kvm_facility(kvm->arch.model.fac_mask, 199); in kvm_vm_ioctl_enable_cap()
825 set_kvm_facility(kvm->arch.model.fac_list, 199); in kvm_vm_ioctl_enable_cap()
829 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
830 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
835 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
836 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
837 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
838 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
840 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
841 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
844 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
849 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
850 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
851 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
853 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
854 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
857 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
862 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
863 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
864 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
865 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
867 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
868 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
871 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
876 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
877 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
878 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
879 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
880 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
883 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
884 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
885 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
891 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
892 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
894 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
900 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
905 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
910 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
911 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
912 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
913 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
915 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
916 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
919 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
924 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
934 switch (attr->attr) { in kvm_s390_get_mem_control()
938 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
939 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
940 ret = -EFAULT; in kvm_s390_get_mem_control()
943 ret = -ENXIO; in kvm_s390_get_mem_control()
953 switch (attr->attr) { in kvm_s390_set_mem_control()
955 ret = -ENXIO; in kvm_s390_set_mem_control()
960 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
961 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
962 ret = -EBUSY; in kvm_s390_set_mem_control()
963 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
964 ret = -EINVAL; in kvm_s390_set_mem_control()
966 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
968 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
971 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
974 ret = -ENXIO; in kvm_s390_set_mem_control()
977 ret = -EINVAL; in kvm_s390_set_mem_control()
978 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
982 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
983 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
984 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
985 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
986 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
993 return -EINVAL; in kvm_s390_set_mem_control()
995 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
996 return -EFAULT; in kvm_s390_set_mem_control()
998 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
999 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
1000 return -E2BIG; in kvm_s390_set_mem_control()
1003 return -EINVAL; in kvm_s390_set_mem_control()
1007 new_limit -= 1; in kvm_s390_set_mem_control()
1009 ret = -EBUSY; in kvm_s390_set_mem_control()
1010 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
1011 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
1013 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
1016 ret = -ENOMEM; in kvm_s390_set_mem_control()
1018 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
1019 new->private = kvm; in kvm_s390_set_mem_control()
1020 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
1024 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1027 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1031 ret = -ENXIO; in kvm_s390_set_mem_control()
1057 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1058 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1061 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1062 return -EINVAL; in kvm_s390_vm_set_crypto()
1065 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1066 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1067 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1072 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1073 return -EINVAL; in kvm_s390_vm_set_crypto()
1076 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1077 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1078 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1083 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1084 return -EINVAL; in kvm_s390_vm_set_crypto()
1086 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1087 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1088 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1093 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1094 return -EINVAL; in kvm_s390_vm_set_crypto()
1096 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1097 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1098 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1103 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1104 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1106 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1110 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1111 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1113 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1116 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1117 return -ENXIO; in kvm_s390_vm_set_crypto()
1121 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1128 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1131 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1132 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1140 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1149 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1171 * Must be called with kvm->srcu held to avoid races on memslots, and with
1172 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1182 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1186 return -EINVAL; in kvm_s390_vm_start_migration()
1188 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1189 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1194 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1195 return -EINVAL; in kvm_s390_vm_start_migration()
1203 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1205 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1206 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1212 * Must be called with kvm->slots_lock to avoid races with ourselves and
1218 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1220 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1221 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1229 int res = -ENXIO; in kvm_s390_vm_set_migration()
1231 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1232 switch (attr->attr) { in kvm_s390_vm_set_migration()
1242 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1250 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1252 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1253 return -ENXIO; in kvm_s390_vm_get_migration()
1255 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1256 return -EFAULT; in kvm_s390_vm_get_migration()
1266 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1267 return -EFAULT; in kvm_s390_set_tod_ext()
1270 return -EINVAL; in kvm_s390_set_tod_ext()
1283 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1285 return -EFAULT; in kvm_s390_set_tod_high()
1288 return -EINVAL; in kvm_s390_set_tod_high()
1298 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1300 return -EFAULT; in kvm_s390_set_tod_low()
1311 if (attr->flags) in kvm_s390_set_tod()
1312 return -EINVAL; in kvm_s390_set_tod()
1314 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1320 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1324 switch (attr->attr) { in kvm_s390_set_tod()
1335 ret = -ENXIO; in kvm_s390_set_tod()
1340 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1353 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1354 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1356 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1357 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1358 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1370 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1371 return -EFAULT; in kvm_s390_get_tod_ext()
1382 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1384 return -EFAULT; in kvm_s390_get_tod_high()
1395 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1396 return -EFAULT; in kvm_s390_get_tod_low()
1406 if (attr->flags) in kvm_s390_get_tod()
1407 return -EINVAL; in kvm_s390_get_tod()
1409 switch (attr->attr) { in kvm_s390_get_tod()
1420 ret = -ENXIO; in kvm_s390_get_tod()
1432 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1433 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1434 ret = -EBUSY; in kvm_s390_set_processor()
1439 ret = -ENOMEM; in kvm_s390_set_processor()
1442 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1444 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1447 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1448 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1449 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1450 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1451 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1453 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1455 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1458 kvm->arch.model.ibc, in kvm_s390_set_processor()
1459 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1461 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1462 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1463 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1465 ret = -EFAULT; in kvm_s390_set_processor()
1468 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1477 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1478 return -EFAULT; in kvm_s390_set_processor_feat()
1482 return -EINVAL; in kvm_s390_set_processor_feat()
1484 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1485 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1486 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1487 return -EBUSY; in kvm_s390_set_processor_feat()
1489 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1490 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1501 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1502 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1503 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1504 return -EBUSY; in kvm_s390_set_processor_subfunc()
1507 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1509 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1510 return -EFAULT; in kvm_s390_set_processor_subfunc()
1512 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1516 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1539 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1548 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1551 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1554 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1563 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1589 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1593 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1594 return -EFAULT; in kvm_s390_set_uv_feat()
1596 return -EINVAL; in kvm_s390_set_uv_feat()
1598 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1599 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1600 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1601 return -EBUSY; in kvm_s390_set_uv_feat()
1603 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1604 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1606 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1613 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1615 switch (attr->attr) { in kvm_s390_set_cpu_model()
1639 ret = -ENOMEM; in kvm_s390_get_processor()
1642 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1643 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1644 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1647 kvm->arch.model.ibc, in kvm_s390_get_processor()
1648 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1650 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1651 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1652 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1653 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1654 ret = -EFAULT; in kvm_s390_get_processor()
1667 ret = -ENOMEM; in kvm_s390_get_machine()
1670 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1671 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1672 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1674 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1677 kvm->arch.model.ibc, in kvm_s390_get_machine()
1678 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1680 mach->fac_mask[0], in kvm_s390_get_machine()
1681 mach->fac_mask[1], in kvm_s390_get_machine()
1682 mach->fac_mask[2]); in kvm_s390_get_machine()
1684 mach->fac_list[0], in kvm_s390_get_machine()
1685 mach->fac_list[1], in kvm_s390_get_machine()
1686 mach->fac_list[2]); in kvm_s390_get_machine()
1687 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1688 ret = -EFAULT; in kvm_s390_get_machine()
1699 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1700 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1701 return -EFAULT; in kvm_s390_get_processor_feat()
1715 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1716 return -EFAULT; in kvm_s390_get_machine_feat()
1727 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1729 return -EFAULT; in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1733 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1747 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1752 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1753 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1755 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1756 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1759 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1762 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1764 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1765 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1767 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1768 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1770 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1771 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1774 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1776 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1777 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1779 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1780 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1781 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1782 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1784 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1785 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1786 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1787 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1798 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1800 return -EFAULT; in kvm_s390_get_machine_subfunc()
1868 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1869 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1871 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1872 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1873 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1880 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1886 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1887 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1888 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1895 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1897 switch (attr->attr) { in kvm_s390_get_cpu_model()
1927 * kvm_s390_update_topology_change_report - update CPU topology change report
1931 * Updates the Multiprocessor Topology-Change-Report bit to signal
1942 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1943 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1944 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1948 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val)); in kvm_s390_update_topology_change_report()
1949 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1956 return -ENXIO; in kvm_s390_set_topo_change_indication()
1958 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1968 return -ENXIO; in kvm_s390_get_topo_change_indication()
1970 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1971 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1972 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1974 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1981 switch (attr->group) { in kvm_s390_vm_set_attr()
2001 ret = -ENXIO; in kvm_s390_vm_set_attr()
2012 switch (attr->group) { in kvm_s390_vm_get_attr()
2029 ret = -ENXIO; in kvm_s390_vm_get_attr()
2040 switch (attr->group) { in kvm_s390_vm_has_attr()
2042 switch (attr->attr) { in kvm_s390_vm_has_attr()
2045 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2051 ret = -ENXIO; in kvm_s390_vm_has_attr()
2056 switch (attr->attr) { in kvm_s390_vm_has_attr()
2062 ret = -ENXIO; in kvm_s390_vm_has_attr()
2067 switch (attr->attr) { in kvm_s390_vm_has_attr()
2079 ret = -ENXIO; in kvm_s390_vm_has_attr()
2084 switch (attr->attr) { in kvm_s390_vm_has_attr()
2093 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2096 ret = -ENXIO; in kvm_s390_vm_has_attr()
2104 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2107 ret = -ENXIO; in kvm_s390_vm_has_attr()
2120 if (args->flags != 0) in kvm_s390_get_skeys()
2121 return -EINVAL; in kvm_s390_get_skeys()
2124 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2128 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2129 return -EINVAL; in kvm_s390_get_skeys()
2131 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2133 return -ENOMEM; in kvm_s390_get_skeys()
2135 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2136 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2137 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2138 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2140 r = -EFAULT; in kvm_s390_get_skeys()
2144 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2148 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2149 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2152 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2153 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2155 r = -EFAULT; in kvm_s390_get_skeys()
2169 if (args->flags != 0) in kvm_s390_set_skeys()
2170 return -EINVAL; in kvm_s390_set_skeys()
2173 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2174 return -EINVAL; in kvm_s390_set_skeys()
2176 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2178 return -ENOMEM; in kvm_s390_set_skeys()
2180 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2181 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2183 r = -EFAULT; in kvm_s390_set_skeys()
2193 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2194 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2195 while (i < args->count) { in kvm_s390_set_skeys()
2197 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2199 r = -EFAULT; in kvm_s390_set_skeys()
2205 r = -EINVAL; in kvm_s390_set_skeys()
2209 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2211 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2219 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2220 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2238 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2240 args->count = 0; in kvm_s390_peek_cmma()
2241 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2248 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2249 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2251 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2268 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2269 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2271 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2275 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2277 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2281 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2284 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2285 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2286 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2287 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2289 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2302 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2304 args->count = 0; in kvm_s390_get_cmma()
2305 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2311 while (args->count < bufsize) { in kvm_s390_get_cmma()
2316 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2317 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2318 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2321 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2330 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2334 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2358 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2359 return -ENXIO; in kvm_s390_get_cmma_bits()
2361 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2362 return -EINVAL; in kvm_s390_get_cmma_bits()
2364 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2365 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2366 return -EINVAL; in kvm_s390_get_cmma_bits()
2368 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2369 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2374 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2381 return -ENOMEM; in kvm_s390_get_cmma_bits()
2383 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2384 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2389 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2390 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2392 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2393 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2395 args->remaining = 0; in kvm_s390_get_cmma_bits()
2397 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2398 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2407 * set and the mm->context.uses_cmm flag is set.
2416 mask = args->mask; in kvm_s390_set_cmma_bits()
2418 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2419 return -ENXIO; in kvm_s390_set_cmma_bits()
2421 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2422 return -EINVAL; in kvm_s390_set_cmma_bits()
2424 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2425 return -EINVAL; in kvm_s390_set_cmma_bits()
2427 if (args->count == 0) in kvm_s390_set_cmma_bits()
2430 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2432 return -ENOMEM; in kvm_s390_set_cmma_bits()
2434 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2436 r = -EFAULT; in kvm_s390_set_cmma_bits()
2440 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2441 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2442 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2443 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2445 r = -EFAULT; in kvm_s390_set_cmma_bits()
2452 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2454 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2455 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2457 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2458 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2459 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2460 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2468 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2478 * Return: 0 in case of success, otherwise -EIO
2496 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2500 ret = -EIO; in kvm_s390_cpus_from_pv()
2502 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2504 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2511 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2519 * Return: 0 in case of success, otherwise -EIO
2534 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2536 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2548 * feature specific data.
2557 switch (info->header.id) { in kvm_s390_handle_pv_info()
2559 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2561 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2562 return -EINVAL; in kvm_s390_handle_pv_info()
2564 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2569 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2570 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2571 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2572 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2577 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2579 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2580 return -EINVAL; in kvm_s390_handle_pv_info()
2582 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2583 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2584 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2588 return -EINVAL; in kvm_s390_handle_pv_info()
2595 int r = -EINVAL; in kvm_s390_pv_dmp()
2600 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2610 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2612 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2614 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2617 r = -EINVAL; in kvm_s390_pv_dmp()
2622 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2631 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2635 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2638 r = -EINVAL; in kvm_s390_pv_dmp()
2643 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2647 r = -ENOTTY; in kvm_s390_pv_dmp()
2656 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2657 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2662 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2664 switch (cmd->cmd) { in kvm_s390_handle_pv()
2666 r = -EINVAL; in kvm_s390_handle_pv()
2678 mmap_write_lock(kvm->mm); in kvm_s390_handle_pv()
2680 mmap_write_unlock(kvm->mm); in kvm_s390_handle_pv()
2684 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2688 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2693 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2697 r = -EINVAL; in kvm_s390_handle_pv()
2701 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2709 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2712 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2715 r = -EINVAL; in kvm_s390_handle_pv()
2718 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2719 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2722 r = -EINVAL; in kvm_s390_handle_pv()
2726 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2734 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2737 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2744 r = -EINVAL; in kvm_s390_handle_pv()
2748 r = -EFAULT; in kvm_s390_handle_pv()
2753 r = -EINVAL; in kvm_s390_handle_pv()
2757 r = -ENOMEM; in kvm_s390_handle_pv()
2762 r = -EFAULT; in kvm_s390_handle_pv()
2766 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2774 r = -EINVAL; in kvm_s390_handle_pv()
2775 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2778 r = -EFAULT; in kvm_s390_handle_pv()
2783 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2787 r = -EINVAL; in kvm_s390_handle_pv()
2792 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2793 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2794 cmd->rrc); in kvm_s390_handle_pv()
2798 r = -EINVAL; in kvm_s390_handle_pv()
2803 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2805 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2809 r = -EINVAL; in kvm_s390_handle_pv()
2814 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2816 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2832 r = -EFAULT; in kvm_s390_handle_pv()
2836 r = -EINVAL; in kvm_s390_handle_pv()
2852 r = -EFAULT; in kvm_s390_handle_pv()
2862 r = -EINVAL; in kvm_s390_handle_pv()
2866 r = -EFAULT; in kvm_s390_handle_pv()
2875 r = -EFAULT; in kvm_s390_handle_pv()
2882 r = -ENOTTY; in kvm_s390_handle_pv()
2885 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2892 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2893 return -EINVAL; in mem_op_validate_common()
2894 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2895 return -E2BIG; in mem_op_validate_common()
2896 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2897 if (mop->key > 0xf) in mem_op_validate_common()
2898 return -EINVAL; in mem_op_validate_common()
2900 mop->key = 0; in mem_op_validate_common()
2907 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2917 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2918 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2920 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2923 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2925 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2930 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2931 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2932 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2936 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2937 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2940 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2941 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2943 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2944 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2947 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2948 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2952 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2960 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2961 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2966 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2978 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2979 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2980 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2981 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2982 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2983 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2985 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2987 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2992 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2993 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2994 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2995 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2998 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
3005 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
3006 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
3008 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
3010 * There is no sane usecase to do switching and a memop on two in kvm_s390_vm_mem_op()
3014 return -EINVAL; in kvm_s390_vm_mem_op()
3016 switch (mop->op) { in kvm_s390_vm_mem_op()
3023 return -EINVAL; in kvm_s390_vm_mem_op()
3029 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
3038 r = -EFAULT; in kvm_arch_vm_ioctl()
3045 r = -EINVAL; in kvm_arch_vm_ioctl()
3046 if (kvm->arch.use_irqchip) in kvm_arch_vm_ioctl()
3051 r = -EFAULT; in kvm_arch_vm_ioctl()
3058 r = -EFAULT; in kvm_arch_vm_ioctl()
3065 r = -EFAULT; in kvm_arch_vm_ioctl()
3074 r = -EFAULT; in kvm_arch_vm_ioctl()
3084 r = -EFAULT; in kvm_arch_vm_ioctl()
3094 r = -EFAULT; in kvm_arch_vm_ioctl()
3097 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3099 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3103 r = -EFAULT; in kvm_arch_vm_ioctl()
3110 r = -EFAULT; in kvm_arch_vm_ioctl()
3113 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3115 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3125 r = -EINVAL; in kvm_arch_vm_ioctl()
3129 r = -EFAULT; in kvm_arch_vm_ioctl()
3133 r = -EINVAL; in kvm_arch_vm_ioctl()
3136 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3139 r = -EFAULT; in kvm_arch_vm_ioctl()
3150 r = -EFAULT; in kvm_arch_vm_ioctl()
3156 r = -EINVAL; in kvm_arch_vm_ioctl()
3160 r = -EFAULT; in kvm_arch_vm_ioctl()
3167 r = -ENOTTY; in kvm_arch_vm_ioctl()
3195 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb); in kvm_s390_set_crycb_format()
3197 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3198 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3205 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3207 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3222 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3228 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3232 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3234 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3237 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3240 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3246 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3247 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3248 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3272 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3279 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3280 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3281 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3282 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3302 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3304 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3310 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3311 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3312 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3313 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3314 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3315 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3320 if (kvm->arch.use_esca) in sca_dispose()
3321 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3323 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3324 kvm->arch.sca = NULL; in sca_dispose()
3342 rc = -EINVAL; in kvm_arch_init_vm()
3357 rc = -ENOMEM; in kvm_arch_init_vm()
3361 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3363 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3364 if (!kvm->arch.sca) in kvm_arch_init_vm()
3370 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3371 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3374 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
3376 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3377 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3381 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3383 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3386 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3387 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3390 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3393 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3396 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3398 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3399 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3400 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3402 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3403 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3405 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3406 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3410 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3412 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3413 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3415 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3420 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3423 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3426 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3427 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3429 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3430 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3431 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3433 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3445 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3446 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3447 /* one flat fake memslot covering the whole address-space */ in kvm_arch_init_vm()
3448 mutex_lock(&kvm->slots_lock); in kvm_arch_init_vm()
3450 mutex_unlock(&kvm->slots_lock); in kvm_arch_init_vm()
3453 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3455 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3457 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3458 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3460 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3461 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3464 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3465 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3466 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3470 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3471 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3472 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3476 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3477 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3488 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3491 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3493 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3495 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3496 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3498 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3503 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3514 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3525 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3526 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3528 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3529 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3531 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3541 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3542 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3543 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3544 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3553 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3554 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3555 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3557 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
3558 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3560 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3562 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
3563 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3565 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3571 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3574 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3575 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3578 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3579 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3580 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3583 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3584 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3585 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3586 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3587 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
3589 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3592 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3593 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3594 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3595 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
3597 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3603 d->sda = s->sda; in sca_copy_entry()
3604 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
3605 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
3612 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
3613 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
3615 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
3620 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3627 if (kvm->arch.use_esca) in sca_switch_to_extended()
3632 return -ENOMEM; in sca_switch_to_extended()
3639 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3644 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
3645 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
3646 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
3648 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3649 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3651 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3656 VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)", in sca_switch_to_extended()
3657 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3675 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3683 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3684 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3685 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3686 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3692 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3693 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3694 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3695 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3696 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3702 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3703 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3710 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3712 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3729 /* set the cpu timer - may only be called from the VCPU thread itself */
3733 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3734 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3735 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3736 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3737 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3741 /* update and get the cpu timer - can also be called from other VCPU threads */
3747 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3748 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3752 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3757 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3758 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3760 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3761 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3762 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3771 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3773 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3778 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3779 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3787 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3789 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3790 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3792 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3793 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3794 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3797 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3798 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3803 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3833 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3836 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3837 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3838 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3839 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC); in kvm_s390_vcpu_crypto_setup()
3841 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3842 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3845 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3846 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3848 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3849 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3850 if (kvm_has_pckmo_hmac(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3851 vcpu->arch.sie_block->ecd |= ECD_HMAC; in kvm_s390_vcpu_crypto_setup()
3854 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3855 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3860 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3861 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3869 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3871 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3877 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3879 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3880 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3881 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3889 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3893 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3895 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3902 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3903 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3904 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3905 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3906 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3907 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3908 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3909 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3910 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3912 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3913 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3914 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3915 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3916 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3918 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3920 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3922 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3924 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3925 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3926 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3927 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3929 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3930 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3931 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3932 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3933 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3934 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3935 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3936 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3938 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3939 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3944 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3946 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3951 hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC, in kvm_s390_vcpu_setup()
3954 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3960 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3961 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3966 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3974 return -EINVAL; in kvm_arch_vcpu_precreate()
3986 return -ENOMEM; in kvm_arch_vcpu_create()
3988 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3989 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3992 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3993 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3995 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3996 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3997 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3998 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
4000 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
4002 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
4009 vcpu->arch.acrs_loaded = false; in kvm_arch_vcpu_create()
4011 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
4012 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
4013 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
4014 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
4015 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
4016 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
4017 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
4018 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
4023 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
4025 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
4027 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
4033 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p", in kvm_arch_vcpu_create()
4034 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4035 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
4041 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
4045 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
4046 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
4048 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
4054 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4060 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
4065 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
4071 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
4076 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
4082 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
4088 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
4099 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4113 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
4128 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4129 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4139 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >= in kvm_arch_no_poll()
4141 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4157 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4159 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4161 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4162 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4165 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4166 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4170 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4173 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4174 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4177 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4178 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4181 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4182 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4185 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4186 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4189 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4190 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4193 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4194 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4206 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4209 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4211 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4212 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4215 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4216 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4219 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4224 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4225 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4228 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4229 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4230 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4234 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4235 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4238 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4239 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4242 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4243 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4246 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4247 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4258 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4259 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4260 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4263 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4277 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4278 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4281 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4282 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4283 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4284 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4287 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4288 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4289 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4290 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4291 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4292 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4293 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4294 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4295 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4296 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4297 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4298 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4305 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4306 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4307 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4308 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4314 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4319 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4320 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4321 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4322 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4324 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4325 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4331 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4339 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4349 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4350 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4361 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4362 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4374 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4376 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4377 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4379 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4390 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4391 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4393 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4394 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4405 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4407 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4408 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4416 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4430 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4433 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4434 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4438 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4442 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4443 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4447 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4451 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4455 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4487 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4488 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4490 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4499 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4507 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4521 struct kvm *kvm = gmap->private; in __kvm_s390_fixup_fault_sync()
4534 rc = fixup_user_fault(gmap->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); in __kvm_s390_fixup_fault_sync()
4541 * __kvm_s390_mprotect_many() - Apply specified protection to guest pages
4548 * Returns: 0 in case of success, < 0 in case of error - see gmap_protect_one()
4550 * Context: kvm->srcu and gmap->mm need to be held in read mode
4561 if (rc == -EAGAIN) { in __kvm_s390_mprotect_many()
4577 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_mprotect_notify_prefix()
4578 mmap_read_lock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4580 rc = __kvm_s390_mprotect_many(vcpu->arch.gmap, gaddr, 2, PROT_WRITE, GMAP_NOTIFY_MPROT); in kvm_s390_mprotect_notify_prefix()
4582 mmap_read_unlock(vcpu->arch.gmap->mm); in kvm_s390_mprotect_notify_prefix()
4583 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_s390_mprotect_notify_prefix()
4595 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4613 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4619 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4627 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4634 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4644 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4650 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4653 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4654 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4655 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4675 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4676 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4678 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4679 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4680 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4685 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4686 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4695 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4698 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4715 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4722 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4723 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4731 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4732 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4755 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4757 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4758 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4764 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4766 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4769 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr); in kvm_arch_setup_async_pf()
4770 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4773 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch); in kvm_arch_setup_async_pf()
4787 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4788 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4793 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4808 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4810 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4811 current->thread.gmap_int_code = 0; in vcpu_pre_run()
4812 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4838 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_addressing_exception()
4843 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_addressing_exception()
4847 pgm_info = vcpu->arch.pgm; in vcpu_post_run_addressing_exception()
4857 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm, in kvm_s390_assert_primary_as()
4859 current->thread.gmap_int_code, current->thread.gmap_teid.val); in kvm_s390_assert_primary_as()
4863 * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
4883 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) in __kvm_s390_handle_dat_fault()
4887 if (vcpu->arch.gmap->pfault_enabled) in __kvm_s390_handle_dat_fault()
4899 return -EAGAIN; in __kvm_s390_handle_dat_fault()
4906 vcpu->stat.pfault_sync++; in __kvm_s390_handle_dat_fault()
4913 return -EFAULT; in __kvm_s390_handle_dat_fault()
4916 mmap_read_lock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4918 rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked); in __kvm_s390_handle_dat_fault()
4920 rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr); in __kvm_s390_handle_dat_fault()
4921 scoped_guard(spinlock, &vcpu->kvm->mmu_lock) { in __kvm_s390_handle_dat_fault()
4922 kvm_release_faultin_page(vcpu->kvm, page, false, writable); in __kvm_s390_handle_dat_fault()
4924 mmap_read_unlock(vcpu->arch.gmap->mm); in __kvm_s390_handle_dat_fault()
4934 if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_dat_fault_handler()
4936 * This translates the per-vCPU guest address into a in vcpu_dat_fault_handler()
4942 mmap_read_lock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4943 gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr); in vcpu_dat_fault_handler()
4944 mmap_read_unlock(vcpu->arch.gmap->mm); in vcpu_dat_fault_handler()
4945 if (gaddr_tmp == -EFAULT) { in vcpu_dat_fault_handler()
4946 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_dat_fault_handler()
4947 vcpu->run->s390_ucontrol.trans_exc_code = gaddr; in vcpu_dat_fault_handler()
4948 vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION; in vcpu_dat_fault_handler()
4949 return -EREMOTE; in vcpu_dat_fault_handler()
4962 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; in vcpu_post_run_handle_fault()
4966 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { in vcpu_post_run_handle_fault()
4968 vcpu->stat.exit_null++; in vcpu_post_run_handle_fault()
4979 if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) { in vcpu_post_run_handle_fault()
4989 current->thread.gmap_int_code, current->comm, in vcpu_post_run_handle_fault()
4990 current->pid); in vcpu_post_run_handle_fault()
5001 rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr); in vcpu_post_run_handle_fault()
5002 if (rc == -EINVAL) in vcpu_post_run_handle_fault()
5004 if (rc != -ENXIO) in vcpu_post_run_handle_fault()
5018 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", in vcpu_post_run_handle_fault()
5019 current->thread.gmap_int_code, current->thread.gmap_teid.val); in vcpu_post_run_handle_fault()
5033 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5034 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
5039 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
5040 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
5042 if (exit_reason == -EINTR) { in vcpu_post_run()
5044 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
5046 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
5051 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
5054 if (rc != -EOPNOTSUPP) in vcpu_post_run()
5056 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
5057 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
5058 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
5059 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
5060 return -EREMOTE; in vcpu_post_run()
5079 * use the low-level arch_local_irq_*() helpers to enable/disable IRQs. in kvm_s390_enter_exit_sie()
5094 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
5097 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
5114 memcpy(sie_page->pv_grregs, in __vcpu_run()
5115 vcpu->run->s.regs.gprs, in __vcpu_run()
5116 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5123 exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block, in __vcpu_run()
5124 vcpu->run->s.regs.gprs, in __vcpu_run()
5125 vcpu->arch.gmap->asce); in __vcpu_run()
5132 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
5133 sie_page->pv_grregs, in __vcpu_run()
5134 sizeof(sie_page->pv_grregs)); in __vcpu_run()
5137 * that leave the guest state in an "in-between" state in __vcpu_run()
5141 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
5142 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
5143 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
5157 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
5161 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
5162 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
5163 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
5164 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
5165 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
5166 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
5167 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
5168 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
5170 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
5171 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
5172 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
5173 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
5174 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
5177 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
5178 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
5179 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
5180 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
5186 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
5187 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
5188 riccb->v && in sync_regs_fmt2()
5189 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
5191 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
5194 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
5197 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
5198 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
5199 gscb->gssm && in sync_regs_fmt2()
5200 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5202 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
5203 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
5204 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
5206 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
5207 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5208 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
5209 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
5214 if (current->thread.gs_cb) { in sync_regs_fmt2()
5215 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
5216 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
5218 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
5219 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
5220 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
5221 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
5230 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
5232 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
5233 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
5234 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
5235 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
5239 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
5240 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
5241 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
5243 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
5244 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
5245 vcpu->arch.acrs_loaded = true; in sync_regs()
5246 kvm_s390_fpu_load(vcpu->run); in sync_regs()
5254 * example we must not inject interrupts after specific exits in sync_regs()
5260 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
5261 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
5265 kvm_run->kvm_dirty_regs = 0; in sync_regs()
5270 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
5272 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
5273 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
5274 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
5275 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
5276 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5280 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5281 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5282 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5283 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5284 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5286 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5294 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5296 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5297 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5298 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5299 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5300 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5301 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5302 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5303 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5304 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5305 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5306 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5307 vcpu->arch.acrs_loaded = false; in store_regs()
5308 kvm_s390_fpu_store(vcpu->run); in store_regs()
5315 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5325 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5326 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5328 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
5329 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5331 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5332 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5333 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5349 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5353 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5354 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5366 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5367 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5375 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5386 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5395 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5396 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5409 return -EFAULT; in kvm_s390_store_status_unloaded()
5413 return -EFAULT; in kvm_s390_store_status_unloaded()
5416 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5420 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5425 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5428 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5430 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5434 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5436 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5440 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5444 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5446 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5447 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5457 kvm_s390_fpu_store(vcpu->run); in kvm_s390_vcpu_store_status()
5458 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5494 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5496 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5497 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5503 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5509 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5514 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5522 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5532 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5538 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5550 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5552 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5553 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5559 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5576 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5592 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5601 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5602 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5604 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5606 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5607 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5608 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5609 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5614 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5623 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5627 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5628 return -EINVAL; in kvm_s390_vcpu_sida_op()
5629 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5630 return -EINVAL; in kvm_s390_vcpu_sida_op()
5631 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5632 return -E2BIG; in kvm_s390_vcpu_sida_op()
5634 return -EINVAL; in kvm_s390_vcpu_sida_op()
5636 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5638 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5640 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5641 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5645 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5646 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5655 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5665 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5666 return -EINVAL; in kvm_s390_vcpu_mem_op()
5668 return -EINVAL; in kvm_s390_vcpu_mem_op()
5669 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5670 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5672 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5675 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5676 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5677 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5678 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5682 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5683 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5686 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5687 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5691 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5692 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5695 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5696 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5700 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5701 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5713 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5715 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5722 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5726 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5729 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5736 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
5745 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5754 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5756 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
5761 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
5766 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_async_ioctl()
5773 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_async_ioctl()
5786 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5787 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5789 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5790 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5794 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5798 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5802 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5804 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5807 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5810 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5814 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5823 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5833 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5835 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5840 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5880 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5883 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5897 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5901 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5902 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5906 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5914 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5918 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5919 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5923 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5929 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5931 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5937 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5949 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5955 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5961 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5973 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5977 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5989 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5993 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5997 r = -EINVAL; in kvm_arch_vcpu_ioctl()
6010 r = -EFAULT; in kvm_arch_vcpu_ioctl()
6014 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
6024 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
6025 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
6026 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
6027 get_page(vmf->page); in kvm_arch_vcpu_fault()
6047 if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) in kvm_arch_prepare_memory_region()
6048 return -EINVAL; in kvm_arch_prepare_memory_region()
6052 return -EINVAL; in kvm_arch_prepare_memory_region()
6062 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
6063 return -EINVAL; in kvm_arch_prepare_memory_region()
6065 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
6067 return -EINVAL; in kvm_arch_prepare_memory_region()
6069 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
6070 return -EINVAL; in kvm_arch_prepare_memory_region()
6073 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
6078 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
6079 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
6085 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
6104 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6105 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6108 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6109 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6114 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
6115 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6116 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
6141 return -ENODEV; in kvm_s390_init()
6146 return -EINVAL; in kvm_s390_init()