Lines Matching refs:kvm
272 static int sca_switch_to_extended(struct kvm *kvm);
305 struct kvm *kvm; in kvm_clock_sync() local
310 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
311 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
314 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
315 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
573 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
614 if (hpage && !(kvm && kvm_is_ucontrol(kvm))) in kvm_vm_ioctl_check_extension()
693 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
698 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
714 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
729 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
737 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
740 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
746 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
757 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
761 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
766 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
771 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
780 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
781 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
785 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
786 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
790 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
791 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
794 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
795 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
797 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
798 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
801 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
802 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
805 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
806 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
809 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
810 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
813 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
814 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
817 set_kvm_facility(kvm->arch.model.fac_mask, 198); in kvm_vm_ioctl_enable_cap()
818 set_kvm_facility(kvm->arch.model.fac_list, 198); in kvm_vm_ioctl_enable_cap()
821 set_kvm_facility(kvm->arch.model.fac_mask, 199); in kvm_vm_ioctl_enable_cap()
822 set_kvm_facility(kvm->arch.model.fac_list, 199); in kvm_vm_ioctl_enable_cap()
827 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
828 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
833 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
834 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
837 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
838 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
841 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
842 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
846 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
847 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
850 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
851 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
854 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
855 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
860 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
861 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
864 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
865 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
868 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
869 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
873 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
874 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
876 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
880 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
881 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
882 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
888 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
889 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
891 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
892 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
896 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
897 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
901 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
902 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
903 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
908 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
909 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
912 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
913 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
916 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
917 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s", in kvm_vm_ioctl_enable_cap()
927 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
934 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
935 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
936 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
946 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
956 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
957 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
958 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
960 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
963 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
965 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
968 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
975 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
978 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
979 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
980 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
981 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
982 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
983 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
989 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
995 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
996 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
1007 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
1008 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
1015 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
1016 new->private = kvm; in kvm_s390_set_mem_control()
1017 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
1021 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
1022 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
1023 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
1024 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1036 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
1041 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1043 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
1049 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1052 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
1054 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1057 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1058 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1062 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1063 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1064 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1065 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1068 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1069 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1073 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1074 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1075 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1076 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1079 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1080 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1083 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1084 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1085 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1086 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1089 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1090 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1093 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1094 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1095 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1096 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1100 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1103 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1107 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1110 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1113 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1117 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
1118 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1125 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1132 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) in kvm_s390_vcpu_pci_enable_interp() argument
1137 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1146 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1148 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1150 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_pci_enable_interp()
1155 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1158 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1163 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1171 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1179 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1181 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1185 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1186 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1202 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1203 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1204 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1212 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1215 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1217 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1218 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1219 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1223 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1228 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1231 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1234 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1239 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1244 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1247 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1257 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1259 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1266 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1268 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_ext()
1270 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1276 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1286 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1291 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1299 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_low()
1300 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1304 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1311 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1316 if (kvm_s390_pv_is_protected(kvm)) { in kvm_s390_set_tod()
1323 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1326 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1329 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1337 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1341 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1350 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1352 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1353 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1361 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1366 kvm_s390_get_tod_clock(kvm, >od); in kvm_s390_get_tod_ext()
1370 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1375 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1382 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1387 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1391 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1394 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1399 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1408 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1411 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1414 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1423 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1429 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1430 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1441 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1446 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1448 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1450 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1452 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1454 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1455 kvm->arch.model.ibc, in kvm_s390_set_processor()
1456 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1457 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1458 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1459 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1460 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1465 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1469 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1481 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1482 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1483 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1486 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1487 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1488 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1495 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1498 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1499 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1500 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1504 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1506 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1509 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1511 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1513 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1516 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1519 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1522 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1525 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1528 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1531 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1534 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1537 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1540 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1543 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1546 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1548 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1549 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1551 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1552 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1555 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1558 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1560 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1563 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1568 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1584 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_uv_feat() argument
1595 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1596 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1597 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1600 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1601 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1603 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1608 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1614 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1617 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1620 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1623 ret = kvm_s390_set_uv_feat(kvm, attr); in kvm_s390_set_cpu_model()
1629 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1639 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1640 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1641 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1643 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1644 kvm->arch.model.ibc, in kvm_s390_get_processor()
1645 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1646 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1647 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1648 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1649 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1657 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1669 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1673 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1674 kvm->arch.model.ibc, in kvm_s390_get_machine()
1675 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1676 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1680 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1691 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1696 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1699 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1706 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1714 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1721 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1724 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1728 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1729 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1730 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1733 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1736 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1739 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1742 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1745 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1747 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1748 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1751 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1752 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1753 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1754 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1755 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1756 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1757 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1759 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1760 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1762 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1763 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1764 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1765 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1766 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1767 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1768 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1769 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1770 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1771 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1772 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1774 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1775 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1776 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1777 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1778 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1779 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1780 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1781 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1782 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1783 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1784 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1785 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1792 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1799 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1804 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1807 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1810 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1813 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1816 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1819 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1822 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1825 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1828 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1831 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1834 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1837 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1840 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1843 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1846 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1851 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1856 VM_EVENT(kvm, 3, "GET: host PFCR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1863 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor_uv_feat() argument
1866 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1870 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1875 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine_uv_feat() argument
1885 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1890 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1896 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1899 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1902 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1905 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1908 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1911 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1914 ret = kvm_s390_get_processor_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1917 ret = kvm_s390_get_machine_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1934 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) in kvm_s390_update_topology_change_report() argument
1939 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1940 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1946 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1949 static int kvm_s390_set_topo_change_indication(struct kvm *kvm, in kvm_s390_set_topo_change_indication() argument
1952 if (!test_kvm_facility(kvm, 11)) in kvm_s390_set_topo_change_indication()
1955 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1959 static int kvm_s390_get_topo_change_indication(struct kvm *kvm, in kvm_s390_get_topo_change_indication() argument
1964 if (!test_kvm_facility(kvm, 11)) in kvm_s390_get_topo_change_indication()
1967 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1968 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1969 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1974 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1980 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1983 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1986 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1989 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1992 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1995 ret = kvm_s390_set_topo_change_indication(kvm, attr); in kvm_s390_vm_set_attr()
2005 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
2011 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
2014 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
2017 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
2020 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
2023 ret = kvm_s390_get_topo_change_indication(kvm, attr); in kvm_s390_vm_get_attr()
2033 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
2101 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2111 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
2133 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2135 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2145 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2159 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
2191 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2194 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2216 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2232 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
2239 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
2246 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2289 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2293 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2300 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2309 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2314 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2315 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2332 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2348 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2355 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2362 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2366 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2371 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2380 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2381 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2383 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2385 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2386 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2387 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2389 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2390 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2406 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2415 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2437 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2438 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2440 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2449 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2451 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2452 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2454 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2455 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2456 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2457 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2477 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_from_pv() argument
2492 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2503 kvm_s390_gisa_enable(kvm); in kvm_s390_cpus_from_pv()
2518 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2528 kvm_s390_gisa_disable(kvm); in kvm_s390_cpus_to_pv()
2530 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2538 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2589 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, in kvm_s390_pv_dmp() argument
2597 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2604 kvm_s390_vcpu_block_all(kvm); in kvm_s390_pv_dmp()
2606 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dmp()
2608 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x", in kvm_s390_pv_dmp()
2611 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2613 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dmp()
2619 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2627 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len, in kvm_s390_pv_dmp()
2632 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2639 r = kvm_s390_pv_dump_complete(kvm, result_buff, in kvm_s390_pv_dmp()
2651 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2659 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2664 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2671 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2679 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2683 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2685 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2688 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2693 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) in kvm_s390_handle_pv()
2696 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2704 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2707 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2714 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2718 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2721 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2729 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2732 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2740 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2760 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2770 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2777 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2783 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2786 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2788 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2794 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2797 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2799 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2805 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2808 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2810 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2858 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2865 r = kvm_s390_pv_dmp(kvm, cmd, dmp); in kvm_s390_handle_pv()
2880 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2900 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_abs() argument
2918 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2920 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2927 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2931 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2942 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2947 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2953 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_cmpxchg() argument
2980 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2982 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2987 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2993 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
2997 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op() argument
3008 if (kvm_s390_pv_get_handle(kvm)) in kvm_s390_vm_mem_op()
3014 return kvm_s390_vm_mem_op_abs(kvm, mop); in kvm_s390_vm_mem_op()
3016 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); in kvm_s390_vm_mem_op()
3024 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
3036 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
3041 if (kvm->arch.use_irqchip) in kvm_arch_vm_ioctl()
3049 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3056 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3063 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3073 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3083 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3092 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3093 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3094 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3108 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3109 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3110 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3117 kvm_s390_set_user_cpu_state_ctrl(kvm); in kvm_arch_vm_ioctl()
3132 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
3143 r = kvm_s390_vm_mem_op(kvm, &mem_op); in kvm_arch_vm_ioctl()
3158 r = kvm_s390_pci_zpci_op(kvm, &args); in kvm_arch_vm_ioctl()
3188 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
3190 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb); in kvm_s390_set_crycb_format()
3193 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3196 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
3200 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3202 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3220 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
3223 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3225 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
3227 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3230 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3233 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3236 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3244 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
3253 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
3254 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
3270 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
3272 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
3274 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3275 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3276 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3277 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3279 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
3281 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
3282 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
3295 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
3297 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3298 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
3299 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3301 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
3305 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3306 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3307 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3308 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3309 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3310 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3313 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
3315 if (kvm->arch.use_esca) in sca_dispose()
3316 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3318 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3319 kvm->arch.sca = NULL; in sca_dispose()
3322 void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
3325 kvm_s390_pci_clear_list(kvm); in kvm_arch_free_vm()
3327 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
3330 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
3356 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3358 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3359 if (!kvm->arch.sca) in kvm_arch_init_vm()
3365 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3366 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3371 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3372 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3376 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3378 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3381 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3382 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3385 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3388 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3391 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3394 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3395 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3397 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3398 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3400 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3401 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3405 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3407 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3408 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3410 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3412 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
3415 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3416 kvm_s390_pci_init_list(kvm); in kvm_arch_init_vm()
3417 kvm_s390_vcpu_pci_enable_interp(kvm); in kvm_arch_init_vm()
3418 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3421 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3422 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3424 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3425 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3426 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3428 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3429 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
3440 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3441 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3443 mutex_lock(&kvm->slots_lock); in kvm_arch_init_vm()
3444 KVM_BUG_ON(kvm_set_internal_memslot(kvm, &fake_memslot), kvm); in kvm_arch_init_vm()
3445 mutex_unlock(&kvm->slots_lock); in kvm_arch_init_vm()
3448 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3450 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3452 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3453 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3455 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3456 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3459 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3460 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3461 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3462 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
3464 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
3465 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3466 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3467 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3471 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3472 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3473 sca_dispose(kvm); in kvm_arch_init_vm()
3486 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3488 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3490 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3493 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3501 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
3505 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm()
3506 sca_dispose(kvm); in kvm_arch_destroy_vm()
3507 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
3513 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
3520 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3521 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3523 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3524 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3525 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
3526 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3527 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
3528 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
3529 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
3530 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
3539 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3548 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3549 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3550 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3555 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3560 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3566 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3573 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3574 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3575 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3584 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3592 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3613 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
3615 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3622 if (kvm->arch.use_esca) in sca_switch_to_extended()
3633 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
3634 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3638 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
3643 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3644 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3646 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3647 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
3651 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3652 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3656 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
3670 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3782 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3784 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3785 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3787 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3788 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3789 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3792 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3796 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3798 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3804 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3807 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3808 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3809 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3810 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3811 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3815 static bool kvm_has_pckmo_hmac(struct kvm *kvm) in kvm_has_pckmo_hmac() argument
3818 return kvm_has_pckmo_subfunc(kvm, 118) || in kvm_has_pckmo_hmac()
3819 kvm_has_pckmo_subfunc(kvm, 122); in kvm_has_pckmo_hmac()
3828 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3831 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3836 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3840 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3843 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3845 if (kvm_has_pckmo_hmac(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3849 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3872 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3875 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3888 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3890 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3898 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3900 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3902 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3904 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3907 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3909 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3920 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3924 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3926 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3941 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3955 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3956 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3961 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3966 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3968 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3992 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
4006 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
4008 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
4010 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
4012 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
4022 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
4028 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
4036 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
4040 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
4049 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4108 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
4120 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
4258 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4483 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4516 struct kvm *kvm = gmap->private; in __kvm_s390_fixup_fault_sync() local
4523 if (kvm_is_ucontrol(kvm)) { in __kvm_s390_fixup_fault_sync()
4528 vmaddr = gfn_to_hva(kvm, gfn); in __kvm_s390_fixup_fault_sync()
4572 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_mprotect_notify_prefix()
4578 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_s390_mprotect_notify_prefix()
4648 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4649 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4660 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in __kvm_s390_set_tod_clock() argument
4670 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4671 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4672 if (test_kvm_facility(kvm, 139)) { in __kvm_s390_set_tod_clock()
4673 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4674 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4675 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4678 kvm_s390_vcpu_block_all(kvm); in __kvm_s390_set_tod_clock()
4679 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvm_s390_set_tod_clock()
4680 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4681 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4684 kvm_s390_vcpu_unblock_all(kvm); in __kvm_s390_set_tod_clock()
4688 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in kvm_s390_try_set_tod_clock() argument
4690 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4692 __kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_try_set_tod_clock()
4693 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4710 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4764 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr); in kvm_arch_setup_async_pf()
4788 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4803 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4852 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm, in kvm_s390_assert_primary_as()
4916 scoped_guard(spinlock, &vcpu->kvm->mmu_lock) { in __kvm_s390_handle_dat_fault()
4917 kvm_release_faultin_page(vcpu->kvm, page, false, writable); in __kvm_s390_handle_dat_fault()
4929 if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_dat_fault_handler()
5008 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", in vcpu_post_run_handle_fault()
5150 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
5161 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
5170 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5288 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5312 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5432 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
5437 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
5459 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5460 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5466 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5472 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5485 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5501 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5515 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5516 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5522 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5539 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5555 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5569 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5570 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5571 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5572 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5676 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5692 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5749 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5796 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5798 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5864 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5881 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5892 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5894 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5988 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5997 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument
6003 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
6010 if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) in kvm_arch_prepare_memory_region()
6014 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
6032 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
6036 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
6049 WARN(kvm_s390_vm_stop_migration(kvm), in kvm_arch_prepare_memory_region()
6055 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
6062 if (kvm_is_ucontrol(kvm)) in kvm_arch_commit_memory_region()
6067 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6071 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
6077 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()