1 /* 2 * handling diagnose instructions 3 * 4 * Copyright IBM Corp. 2008, 2011 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com> 12 */ 13 14 #include <linux/kvm.h> 15 #include <linux/kvm_host.h> 16 #include <asm/pgalloc.h> 17 #include <asm/virtio-ccw.h> 18 #include "kvm-s390.h" 19 #include "trace.h" 20 #include "trace-s390.h" 21 #include "gaccess.h" 22 23 static int diag_release_pages(struct kvm_vcpu *vcpu) 24 { 25 unsigned long start, end; 26 unsigned long prefix = kvm_s390_get_prefix(vcpu); 27 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 30 31 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end 32 || start < 2 * PAGE_SIZE) 33 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 34 35 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 36 vcpu->stat.diagnose_10++; 37 38 /* 39 * We checked for start >= end above, so lets check for the 40 * fast path (no prefix swap page involved) 41 */ 42 if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { 43 gmap_discard(vcpu->arch.gmap, start, end); 44 } else { 45 /* 46 * This is slow path. gmap_discard will check for start 47 * so lets split this into before prefix, prefix, after 48 * prefix and let gmap_discard make some of these calls 49 * NOPs. 50 */ 51 gmap_discard(vcpu->arch.gmap, start, prefix); 52 if (start <= prefix) 53 gmap_discard(vcpu->arch.gmap, 0, 4096); 54 if (end > prefix + 4096) 55 gmap_discard(vcpu->arch.gmap, 4096, 8192); 56 gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); 57 } 58 return 0; 59 } 60 61 static int __diag_page_ref_service(struct kvm_vcpu *vcpu) 62 { 63 struct prs_parm { 64 u16 code; 65 u16 subcode; 66 u16 parm_len; 67 u16 parm_version; 68 u64 token_addr; 69 u64 select_mask; 70 u64 compare_mask; 71 u64 zarch; 72 }; 73 struct prs_parm parm; 74 int rc; 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); 77 78 if (vcpu->run->s.regs.gprs[rx] & 7) 79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); 81 if (rc) 82 return kvm_s390_inject_prog_cond(vcpu, rc); 83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) 84 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 85 86 switch (parm.subcode) { 87 case 0: /* TOKEN */ 88 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { 89 /* 90 * If the pagefault handshake is already activated, 91 * the token must not be changed. We have to return 92 * decimal 8 instead, as mandated in SC24-6084. 93 */ 94 vcpu->run->s.regs.gprs[ry] = 8; 95 return 0; 96 } 97 98 if ((parm.compare_mask & parm.select_mask) != parm.compare_mask || 99 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) 100 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 101 102 if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) 103 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 104 105 vcpu->arch.pfault_token = parm.token_addr; 106 vcpu->arch.pfault_select = parm.select_mask; 107 vcpu->arch.pfault_compare = parm.compare_mask; 108 vcpu->run->s.regs.gprs[ry] = 0; 109 rc = 0; 110 break; 111 case 1: /* 112 * CANCEL 113 * Specification allows to let already pending tokens survive 114 * the cancel, therefore to reduce code complexity, we assume 115 * all outstanding tokens are already pending. 116 */ 117 if (parm.token_addr || parm.select_mask || 118 parm.compare_mask || parm.zarch) 119 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 120 121 vcpu->run->s.regs.gprs[ry] = 0; 122 /* 123 * If the pfault handling was not established or is already 124 * canceled SC24-6084 requests to return decimal 4. 125 */ 126 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 127 vcpu->run->s.regs.gprs[ry] = 4; 128 else 129 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 130 131 rc = 0; 132 break; 133 default: 134 rc = -EOPNOTSUPP; 135 break; 136 } 137 138 return rc; 139 } 140 141 static int __diag_time_slice_end(struct kvm_vcpu *vcpu) 142 { 143 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 144 vcpu->stat.diagnose_44++; 145 kvm_vcpu_on_spin(vcpu); 146 return 0; 147 } 148 149 static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) 150 { 151 struct kvm *kvm = vcpu->kvm; 152 struct kvm_vcpu *tcpu; 153 int tid; 154 int i; 155 156 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 157 vcpu->stat.diagnose_9c++; 158 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); 159 160 if (tid == vcpu->vcpu_id) 161 return 0; 162 163 kvm_for_each_vcpu(i, tcpu, kvm) 164 if (tcpu->vcpu_id == tid) { 165 kvm_vcpu_yield_to(tcpu); 166 break; 167 } 168 169 return 0; 170 } 171 172 static int __diag_ipl_functions(struct kvm_vcpu *vcpu) 173 { 174 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 175 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; 176 177 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); 178 switch (subcode) { 179 case 3: 180 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; 181 break; 182 case 4: 183 vcpu->run->s390_reset_flags = 0; 184 break; 185 default: 186 return -EOPNOTSUPP; 187 } 188 189 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 190 kvm_s390_vcpu_stop(vcpu); 191 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 192 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 193 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 194 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; 195 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", 196 vcpu->run->s390_reset_flags); 197 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); 198 return -EREMOTE; 199 } 200 201 static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) 202 { 203 int ret; 204 205 /* No virtio-ccw notification? Get out quickly. */ 206 if (!vcpu->kvm->arch.css_support || 207 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 208 return -EOPNOTSUPP; 209 210 /* 211 * The layout is as follows: 212 * - gpr 2 contains the subchannel id (passed as addr) 213 * - gpr 3 contains the virtqueue index (passed as datamatch) 214 * - gpr 4 contains the index on the bus (optionally) 215 */ 216 ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, 217 vcpu->run->s.regs.gprs[2] & 0xffffffff, 218 8, &vcpu->run->s.regs.gprs[3], 219 vcpu->run->s.regs.gprs[4]); 220 221 /* 222 * Return cookie in gpr 2, but don't overwrite the register if the 223 * diagnose will be handled by userspace. 224 */ 225 if (ret != -EOPNOTSUPP) 226 vcpu->run->s.regs.gprs[2] = ret; 227 /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ 228 return ret < 0 ? ret : 0; 229 } 230 231 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 232 { 233 int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; 234 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 237 238 trace_kvm_s390_handle_diag(vcpu, code); 239 switch (code) { 240 case 0x10: 241 return diag_release_pages(vcpu); 242 case 0x44: 243 return __diag_time_slice_end(vcpu); 244 case 0x9c: 245 return __diag_time_slice_end_directed(vcpu); 246 case 0x258: 247 return __diag_page_ref_service(vcpu); 248 case 0x308: 249 return __diag_ipl_functions(vcpu); 250 case 0x500: 251 return __diag_virtio_hypercall(vcpu); 252 default: 253 return -EOPNOTSUPP; 254 } 255 } 256