kvm-s390.c (41be702a542a0d14bb0b1c16e824fa9ed27616ec) | kvm-s390.c (c05c4186bbe4e99d64e8a36f7ca7f480da5d109f) |
---|---|
1/* 2 * hosting zSeries kernel virtual machines 3 * 4 * Copyright IBM Corp. 2008, 2009 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. --- 143 unchanged lines hidden (view full) --- 152#ifdef CONFIG_KVM_S390_UCONTROL 153 case KVM_CAP_S390_UCONTROL: 154#endif 155 case KVM_CAP_SYNC_REGS: 156 case KVM_CAP_ONE_REG: 157 case KVM_CAP_ENABLE_CAP: 158 case KVM_CAP_S390_CSS_SUPPORT: 159 case KVM_CAP_IOEVENTFD: | 1/* 2 * hosting zSeries kernel virtual machines 3 * 4 * Copyright IBM Corp. 2008, 2009 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. --- 143 unchanged lines hidden (view full) --- 152#ifdef CONFIG_KVM_S390_UCONTROL 153 case KVM_CAP_S390_UCONTROL: 154#endif 155 case KVM_CAP_SYNC_REGS: 156 case KVM_CAP_ONE_REG: 157 case KVM_CAP_ENABLE_CAP: 158 case KVM_CAP_S390_CSS_SUPPORT: 159 case KVM_CAP_IOEVENTFD: |
160 case KVM_CAP_DEVICE_CTRL: |
|
160 r = 1; 161 break; 162 case KVM_CAP_NR_VCPUS: 163 case KVM_CAP_MAX_VCPUS: 164 r = KVM_MAX_VCPUS; 165 break; 166 case KVM_CAP_NR_MEMSLOTS: 167 r = KVM_USER_MEM_SLOTS; --- 222 unchanged lines hidden (view full) --- 390 391int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 392{ 393 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 394 CPUSTAT_SM | 395 CPUSTAT_STOPPED | 396 CPUSTAT_GED); 397 vcpu->arch.sie_block->ecb = 6; | 161 r = 1; 162 break; 163 case KVM_CAP_NR_VCPUS: 164 case KVM_CAP_MAX_VCPUS: 165 r = KVM_MAX_VCPUS; 166 break; 167 case KVM_CAP_NR_MEMSLOTS: 168 r = KVM_USER_MEM_SLOTS; --- 222 unchanged lines hidden (view full) --- 391 392int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 393{ 394 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 395 CPUSTAT_SM | 396 CPUSTAT_STOPPED | 397 CPUSTAT_GED); 398 vcpu->arch.sie_block->ecb = 6; |
399 if (test_vfacility(50) && test_vfacility(73)) 400 vcpu->arch.sie_block->ecb |= 0x10; 401 |
|
398 vcpu->arch.sie_block->ecb2 = 8; 399 vcpu->arch.sie_block->eca = 0xC1002001U; 400 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 401 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 402 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 403 (unsigned long) vcpu); 404 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 405 get_cpu_id(&vcpu->arch.cpu_id); 406 vcpu->arch.cpu_id.version = 0xff; 407 return 0; 408} 409 410struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 411 unsigned int id) 412{ 413 struct kvm_vcpu *vcpu; | 402 vcpu->arch.sie_block->ecb2 = 8; 403 vcpu->arch.sie_block->eca = 0xC1002001U; 404 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 405 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 406 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 407 (unsigned long) vcpu); 408 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 409 get_cpu_id(&vcpu->arch.cpu_id); 410 vcpu->arch.cpu_id.version = 0xff; 411 return 0; 412} 413 414struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 415 unsigned int id) 416{ 417 struct kvm_vcpu *vcpu; |
418 struct sie_page *sie_page; |
|
414 int rc = -EINVAL; 415 416 if (id >= KVM_MAX_VCPUS) 417 goto out; 418 419 rc = -ENOMEM; 420 421 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 422 if (!vcpu) 423 goto out; 424 | 419 int rc = -EINVAL; 420 421 if (id >= KVM_MAX_VCPUS) 422 goto out; 423 424 rc = -ENOMEM; 425 426 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 427 if (!vcpu) 428 goto out; 429 |
425 vcpu->arch.sie_block = (struct kvm_s390_sie_block *) 426 get_zeroed_page(GFP_KERNEL); 427 428 if (!vcpu->arch.sie_block) | 430 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 431 if (!sie_page) |
429 goto out_free_cpu; 430 | 432 goto out_free_cpu; 433 |
434 vcpu->arch.sie_block = &sie_page->sie_block; 435 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 436 |
|
431 vcpu->arch.sie_block->icpua = id; 432 if (!kvm_is_ucontrol(kvm)) { 433 if (!kvm->arch.sca) { 434 WARN_ON_ONCE(1); 435 goto out_free_cpu; 436 } 437 if (!kvm->arch.sca->cpu[id].sda) 438 kvm->arch.sca->cpu[id].sda = --- 288 unchanged lines hidden (view full) --- 727 int rc; 728 729 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 730 vcpu->arch.sie_block->icptcode); 731 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 732 733 if (exit_reason >= 0) { 734 rc = 0; | 437 vcpu->arch.sie_block->icpua = id; 438 if (!kvm_is_ucontrol(kvm)) { 439 if (!kvm->arch.sca) { 440 WARN_ON_ONCE(1); 441 goto out_free_cpu; 442 } 443 if (!kvm->arch.sca->cpu[id].sda) 444 kvm->arch.sca->cpu[id].sda = --- 288 unchanged lines hidden (view full) --- 733 int rc; 734 735 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 736 vcpu->arch.sie_block->icptcode); 737 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 738 739 if (exit_reason >= 0) { 740 rc = 0; |
741 } else if (kvm_is_ucontrol(vcpu->kvm)) { 742 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 743 vcpu->run->s390_ucontrol.trans_exc_code = 744 current->thread.gmap_addr; 745 vcpu->run->s390_ucontrol.pgm_code = 0x10; 746 rc = -EREMOTE; |
|
735 } else { | 747 } else { |
736 if (kvm_is_ucontrol(vcpu->kvm)) { 737 rc = SIE_INTERCEPT_UCONTROL; 738 } else { 739 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 740 trace_kvm_s390_sie_fault(vcpu); 741 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 742 } | 748 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 749 trace_kvm_s390_sie_fault(vcpu); 750 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
743 } 744 745 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 746 747 if (rc == 0) { 748 if (kvm_is_ucontrol(vcpu->kvm)) 749 rc = -EOPNOTSUPP; 750 else --- 77 unchanged lines hidden (view full) --- 828 might_fault(); 829 rc = __vcpu_run(vcpu); 830 831 if (signal_pending(current) && !rc) { 832 kvm_run->exit_reason = KVM_EXIT_INTR; 833 rc = -EINTR; 834 } 835 | 751 } 752 753 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 754 755 if (rc == 0) { 756 if (kvm_is_ucontrol(vcpu->kvm)) 757 rc = -EOPNOTSUPP; 758 else --- 77 unchanged lines hidden (view full) --- 836 might_fault(); 837 rc = __vcpu_run(vcpu); 838 839 if (signal_pending(current) && !rc) { 840 kvm_run->exit_reason = KVM_EXIT_INTR; 841 rc = -EINTR; 842 } 843 |
836#ifdef CONFIG_KVM_S390_UCONTROL 837 if (rc == SIE_INTERCEPT_UCONTROL) { 838 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; 839 kvm_run->s390_ucontrol.trans_exc_code = 840 current->thread.gmap_addr; 841 kvm_run->s390_ucontrol.pgm_code = 0x10; 842 rc = 0; 843 } 844#endif 845 | |
846 if (rc == -EOPNOTSUPP) { 847 /* intercept cannot be handled in-kernel, prepare kvm-run */ 848 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 849 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 850 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 851 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 852 rc = 0; 853 } --- 26 unchanged lines hidden (view full) --- 880} 881 882/* 883 * store status at address 884 * we use have two special cases: 885 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 886 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 887 */ | 844 if (rc == -EOPNOTSUPP) { 845 /* intercept cannot be handled in-kernel, prepare kvm-run */ 846 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 847 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 848 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 849 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 850 rc = 0; 851 } --- 26 unchanged lines hidden (view full) --- 878} 879 880/* 881 * store status at address 882 * we use have two special cases: 883 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 884 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 885 */ |
888int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 886int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) |
889{ 890 unsigned char archmode = 1; 891 int prefix; | 887{ 888 unsigned char archmode = 1; 889 int prefix; |
890 u64 clkcomp; |
|
892 893 if (addr == KVM_S390_STORE_STATUS_NOADDR) { 894 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) 895 return -EFAULT; 896 addr = SAVE_AREA_BASE; 897 prefix = 0; 898 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { 899 if (copy_to_guest(vcpu, 163ul, &archmode, 1)) 900 return -EFAULT; 901 addr = SAVE_AREA_BASE; 902 prefix = 1; 903 } else 904 prefix = 0; 905 | 891 892 if (addr == KVM_S390_STORE_STATUS_NOADDR) { 893 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) 894 return -EFAULT; 895 addr = SAVE_AREA_BASE; 896 prefix = 0; 897 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { 898 if (copy_to_guest(vcpu, 163ul, &archmode, 1)) 899 return -EFAULT; 900 addr = SAVE_AREA_BASE; 901 prefix = 1; 902 } else 903 prefix = 0; 904 |
906 /* 907 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 908 * copying in vcpu load/put. Lets update our copies before we save 909 * it into the save area 910 */ 911 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 912 save_fp_regs(vcpu->arch.guest_fpregs.fprs); 913 save_access_regs(vcpu->run->s.regs.acrs); 914 | |
915 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), 916 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 917 return -EFAULT; 918 919 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), 920 vcpu->run->s.regs.gprs, 128, prefix)) 921 return -EFAULT; 922 --- 13 unchanged lines hidden (view full) --- 936 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), 937 &vcpu->arch.sie_block->todpr, 4, prefix)) 938 return -EFAULT; 939 940 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), 941 &vcpu->arch.sie_block->cputm, 8, prefix)) 942 return -EFAULT; 943 | 905 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), 906 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 907 return -EFAULT; 908 909 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), 910 vcpu->run->s.regs.gprs, 128, prefix)) 911 return -EFAULT; 912 --- 13 unchanged lines hidden (view full) --- 926 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), 927 &vcpu->arch.sie_block->todpr, 4, prefix)) 928 return -EFAULT; 929 930 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), 931 &vcpu->arch.sie_block->cputm, 8, prefix)) 932 return -EFAULT; 933 |
934 clkcomp = vcpu->arch.sie_block->ckc >> 8; |
|
944 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), | 935 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), |
945 &vcpu->arch.sie_block->ckc, 8, prefix)) | 936 &clkcomp, 8, prefix)) |
946 return -EFAULT; 947 948 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), 949 &vcpu->run->s.regs.acrs, 64, prefix)) 950 return -EFAULT; 951 952 if (__guestcopy(vcpu, 953 addr + offsetof(struct save_area, ctrl_regs), 954 &vcpu->arch.sie_block->gcr, 128, prefix)) 955 return -EFAULT; 956 return 0; 957} 958 | 937 return -EFAULT; 938 939 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), 940 &vcpu->run->s.regs.acrs, 64, prefix)) 941 return -EFAULT; 942 943 if (__guestcopy(vcpu, 944 addr + offsetof(struct save_area, ctrl_regs), 945 &vcpu->arch.sie_block->gcr, 128, prefix)) 946 return -EFAULT; 947 return 0; 948} 949 |
950int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 951{ 952 /* 953 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 954 * copying in vcpu load/put. Lets update our copies before we save 955 * it into the save area 956 */ 957 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 958 save_fp_regs(vcpu->arch.guest_fpregs.fprs); 959 save_access_regs(vcpu->run->s.regs.acrs); 960 961 return kvm_s390_store_status_unloaded(vcpu, addr); 962} 963 |
|
959static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 960 struct kvm_enable_cap *cap) 961{ 962 int r; 963 964 if (cap->flags) 965 return -EINVAL; 966 --- 211 unchanged lines hidden (view full) --- 1178 * only set facilities that are known to work in KVM. 1179 */ 1180 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 1181 if (!vfacilities) { 1182 kvm_exit(); 1183 return -ENOMEM; 1184 } 1185 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); | 964static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 965 struct kvm_enable_cap *cap) 966{ 967 int r; 968 969 if (cap->flags) 970 return -EINVAL; 971 --- 211 unchanged lines hidden (view full) --- 1183 * only set facilities that are known to work in KVM. 1184 */ 1185 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 1186 if (!vfacilities) { 1187 kvm_exit(); 1188 return -ENOMEM; 1189 } 1190 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); |
1186 vfacilities[0] &= 0xff82fff3f47c0000UL; 1187 vfacilities[1] &= 0x001c000000000000UL; | 1191 vfacilities[0] &= 0xff82fff3f4fc2000UL; 1192 vfacilities[1] &= 0x005c000000000000UL; |
1188 return 0; 1189} 1190 1191static void __exit kvm_s390_exit(void) 1192{ 1193 free_page((unsigned long) vfacilities); 1194 kvm_exit(); 1195} --- 12 unchanged lines hidden --- | 1193 return 0; 1194} 1195 1196static void __exit kvm_s390_exit(void) 1197{ 1198 free_page((unsigned long) vfacilities); 1199 kvm_exit(); 1200} --- 12 unchanged lines hidden --- |