1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling interprocessor communication 4 * 5 * Copyright IBM Corp. 2008, 2013 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License (version 2 only) 9 * as published by the Free Software Foundation. 10 * 11 * Author(s): Carsten Otte <cotte@de.ibm.com> 12 * Christian Borntraeger <borntraeger@de.ibm.com> 13 * Christian Ehrhardt <ehrhardt@de.ibm.com> 14 */ 15 16 #include <linux/kvm.h> 17 #include <linux/kvm_host.h> 18 #include <linux/slab.h> 19 #include <asm/sigp.h> 20 #include "gaccess.h" 21 #include "kvm-s390.h" 22 #include "trace.h" 23 24 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 25 u64 *reg) 26 { 27 struct kvm_s390_local_interrupt *li; 28 int cpuflags; 29 int rc; 30 int ext_call_pending; 31 32 li = &dst_vcpu->arch.local_int; 33 34 cpuflags = atomic_read(li->cpuflags); 35 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); 36 if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) 37 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 38 else { 39 *reg &= 0xffffffff00000000UL; 40 if (ext_call_pending) 41 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 42 if (cpuflags & CPUSTAT_STOPPED) 43 *reg |= SIGP_STATUS_STOPPED; 44 rc = SIGP_CC_STATUS_STORED; 45 } 46 47 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, 48 rc); 49 return rc; 50 } 51 52 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 53 struct kvm_vcpu *dst_vcpu) 54 { 55 struct kvm_s390_irq irq = { 56 .type = KVM_S390_INT_EMERGENCY, 57 .u.emerg.code = vcpu->vcpu_id, 58 }; 59 int rc = 0; 60 61 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 62 if (!rc) 63 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", 64 dst_vcpu->vcpu_id); 65 66 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 67 } 68 69 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 70 { 71 return __inject_sigp_emergency(vcpu, dst_vcpu); 72 } 73 74 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, 75 struct kvm_vcpu *dst_vcpu, 76 u16 asn, u64 *reg) 77 { 78 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; 79 u16 p_asn, s_asn; 80 psw_t *psw; 81 bool idle; 82 83 idle = is_vcpu_idle(vcpu); 84 psw = &dst_vcpu->arch.sie_block->gpsw; 85 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ 86 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ 87 88 /* Inject the emergency signal? */ 89 if (!is_vcpu_stopped(vcpu) 90 || (psw->mask & psw_int_mask) != psw_int_mask 91 || (idle && psw->addr != 0) 92 || (!idle && (asn == p_asn || asn == s_asn))) { 93 return __inject_sigp_emergency(vcpu, dst_vcpu); 94 } else { 95 *reg &= 0xffffffff00000000UL; 96 *reg |= SIGP_STATUS_INCORRECT_STATE; 97 return SIGP_CC_STATUS_STORED; 98 } 99 } 100 101 static int __sigp_external_call(struct kvm_vcpu *vcpu, 102 struct kvm_vcpu *dst_vcpu, u64 *reg) 103 { 104 struct kvm_s390_irq irq = { 105 .type = KVM_S390_INT_EXTERNAL_CALL, 106 .u.extcall.code = vcpu->vcpu_id, 107 }; 108 int rc; 109 110 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 111 if (rc == -EBUSY) { 112 *reg &= 0xffffffff00000000UL; 113 *reg |= SIGP_STATUS_EXT_CALL_PENDING; 114 return SIGP_CC_STATUS_STORED; 115 } else if (rc == 0) { 116 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", 117 dst_vcpu->vcpu_id); 118 } 119 120 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; 121 } 122 123 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) 124 { 125 struct kvm_s390_irq irq = { 126 .type = KVM_S390_SIGP_STOP, 127 }; 128 int rc; 129 130 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 131 if (rc == -EBUSY) 132 rc = SIGP_CC_BUSY; 133 else if (rc == 0) 134 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", 135 dst_vcpu->vcpu_id); 136 137 return rc; 138 } 139 140 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, 141 struct kvm_vcpu *dst_vcpu, u64 *reg) 142 { 143 struct kvm_s390_irq irq = { 144 .type = KVM_S390_SIGP_STOP, 145 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, 146 }; 147 int rc; 148 149 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 150 if (rc == -EBUSY) 151 rc = SIGP_CC_BUSY; 152 else if (rc == 0) 153 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", 154 dst_vcpu->vcpu_id); 155 156 return rc; 157 } 158 159 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, 160 u64 *status_reg) 161 { 162 unsigned int i; 163 struct kvm_vcpu *v; 164 bool all_stopped = true; 165 166 kvm_for_each_vcpu(i, v, vcpu->kvm) { 167 if (v == vcpu) 168 continue; 169 if (!is_vcpu_stopped(v)) 170 all_stopped = false; 171 } 172 173 *status_reg &= 0xffffffff00000000UL; 174 175 /* Reject set arch order, with czam we're always in z/Arch mode. */ 176 *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : 177 SIGP_STATUS_INCORRECT_STATE); 178 return SIGP_CC_STATUS_STORED; 179 } 180 181 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, 182 u32 address, u64 *reg) 183 { 184 struct kvm_s390_irq irq = { 185 .type = KVM_S390_SIGP_SET_PREFIX, 186 .u.prefix.address = address & 0x7fffe000u, 187 }; 188 int rc; 189 190 /* 191 * Make sure the new value is valid memory. We only need to check the 192 * first page, since address is 8k aligned and memory pieces are always 193 * at least 1MB aligned and have at least a size of 1MB. 194 */ 195 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 196 *reg &= 0xffffffff00000000UL; 197 *reg |= SIGP_STATUS_INVALID_PARAMETER; 198 return SIGP_CC_STATUS_STORED; 199 } 200 201 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); 202 if (rc == -EBUSY) { 203 *reg &= 0xffffffff00000000UL; 204 *reg |= SIGP_STATUS_INCORRECT_STATE; 205 return SIGP_CC_STATUS_STORED; 206 } 207 208 return rc; 209 } 210 211 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, 212 struct kvm_vcpu *dst_vcpu, 213 u32 addr, u64 *reg) 214 { 215 int flags; 216 int rc; 217 218 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); 219 if (!(flags & CPUSTAT_STOPPED)) { 220 *reg &= 0xffffffff00000000UL; 221 *reg |= SIGP_STATUS_INCORRECT_STATE; 222 return SIGP_CC_STATUS_STORED; 223 } 224 225 addr &= 0x7ffffe00; 226 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr); 227 if (rc == -EFAULT) { 228 *reg &= 0xffffffff00000000UL; 229 *reg |= SIGP_STATUS_INVALID_PARAMETER; 230 rc = SIGP_CC_STATUS_STORED; 231 } 232 return rc; 233 } 234 235 static int __sigp_sense_running(struct kvm_vcpu *vcpu, 236 struct kvm_vcpu *dst_vcpu, u64 *reg) 237 { 238 struct kvm_s390_local_interrupt *li; 239 int rc; 240 241 if (!test_kvm_facility(vcpu->kvm, 9)) { 242 *reg &= 0xffffffff00000000UL; 243 *reg |= SIGP_STATUS_INVALID_ORDER; 244 return SIGP_CC_STATUS_STORED; 245 } 246 247 li = &dst_vcpu->arch.local_int; 248 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 249 /* running */ 250 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 251 } else { 252 /* not running */ 253 *reg &= 0xffffffff00000000UL; 254 *reg |= SIGP_STATUS_NOT_RUNNING; 255 rc = SIGP_CC_STATUS_STORED; 256 } 257 258 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", 259 dst_vcpu->vcpu_id, rc); 260 261 return rc; 262 } 263 264 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, 265 struct kvm_vcpu *dst_vcpu, u8 order_code) 266 { 267 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; 268 /* handle (RE)START in user space */ 269 int rc = -EOPNOTSUPP; 270 271 /* make sure we don't race with STOP irq injection */ 272 spin_lock(&li->lock); 273 if (kvm_s390_is_stop_irq_pending(dst_vcpu)) 274 rc = SIGP_CC_BUSY; 275 spin_unlock(&li->lock); 276 277 return rc; 278 } 279 280 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, 281 struct kvm_vcpu *dst_vcpu, u8 order_code) 282 { 283 /* handle (INITIAL) CPU RESET in user space */ 284 return -EOPNOTSUPP; 285 } 286 287 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, 288 struct kvm_vcpu *dst_vcpu) 289 { 290 /* handle unknown orders in user space */ 291 return -EOPNOTSUPP; 292 } 293 294 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, 295 u16 cpu_addr, u32 parameter, u64 *status_reg) 296 { 297 int rc; 298 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 299 300 if (!dst_vcpu) 301 return SIGP_CC_NOT_OPERATIONAL; 302 303 switch (order_code) { 304 case SIGP_SENSE: 305 vcpu->stat.instruction_sigp_sense++; 306 rc = __sigp_sense(vcpu, dst_vcpu, status_reg); 307 break; 308 case SIGP_EXTERNAL_CALL: 309 vcpu->stat.instruction_sigp_external_call++; 310 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); 311 break; 312 case SIGP_EMERGENCY_SIGNAL: 313 vcpu->stat.instruction_sigp_emergency++; 314 rc = __sigp_emergency(vcpu, dst_vcpu); 315 break; 316 case SIGP_STOP: 317 vcpu->stat.instruction_sigp_stop++; 318 rc = __sigp_stop(vcpu, dst_vcpu); 319 break; 320 case SIGP_STOP_AND_STORE_STATUS: 321 vcpu->stat.instruction_sigp_stop_store_status++; 322 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg); 323 break; 324 case SIGP_STORE_STATUS_AT_ADDRESS: 325 vcpu->stat.instruction_sigp_store_status++; 326 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter, 327 status_reg); 328 break; 329 case SIGP_SET_PREFIX: 330 vcpu->stat.instruction_sigp_prefix++; 331 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg); 332 break; 333 case SIGP_COND_EMERGENCY_SIGNAL: 334 vcpu->stat.instruction_sigp_cond_emergency++; 335 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter, 336 status_reg); 337 break; 338 case SIGP_SENSE_RUNNING: 339 vcpu->stat.instruction_sigp_sense_running++; 340 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg); 341 break; 342 case SIGP_START: 343 vcpu->stat.instruction_sigp_start++; 344 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 345 break; 346 case SIGP_RESTART: 347 vcpu->stat.instruction_sigp_restart++; 348 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); 349 break; 350 case SIGP_INITIAL_CPU_RESET: 351 vcpu->stat.instruction_sigp_init_cpu_reset++; 352 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 353 break; 354 case SIGP_CPU_RESET: 355 vcpu->stat.instruction_sigp_cpu_reset++; 356 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); 357 break; 358 default: 359 vcpu->stat.instruction_sigp_unknown++; 360 rc = __prepare_sigp_unknown(vcpu, dst_vcpu); 361 } 362 363 if (rc == -EOPNOTSUPP) 364 VCPU_EVENT(vcpu, 4, 365 "sigp order %u -> cpu %x: handled in user space", 366 order_code, dst_vcpu->vcpu_id); 367 368 return rc; 369 } 370 371 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, 372 u16 cpu_addr) 373 { 374 if (!vcpu->kvm->arch.user_sigp) 375 return 0; 376 377 switch (order_code) { 378 case SIGP_SENSE: 379 case SIGP_EXTERNAL_CALL: 380 case SIGP_EMERGENCY_SIGNAL: 381 case SIGP_COND_EMERGENCY_SIGNAL: 382 case SIGP_SENSE_RUNNING: 383 return 0; 384 /* update counters as we're directly dropping to user space */ 385 case SIGP_STOP: 386 vcpu->stat.instruction_sigp_stop++; 387 break; 388 case SIGP_STOP_AND_STORE_STATUS: 389 vcpu->stat.instruction_sigp_stop_store_status++; 390 break; 391 case SIGP_STORE_STATUS_AT_ADDRESS: 392 vcpu->stat.instruction_sigp_store_status++; 393 break; 394 case SIGP_STORE_ADDITIONAL_STATUS: 395 vcpu->stat.instruction_sigp_store_adtl_status++; 396 break; 397 case SIGP_SET_PREFIX: 398 vcpu->stat.instruction_sigp_prefix++; 399 break; 400 case SIGP_START: 401 vcpu->stat.instruction_sigp_start++; 402 break; 403 case SIGP_RESTART: 404 vcpu->stat.instruction_sigp_restart++; 405 break; 406 case SIGP_INITIAL_CPU_RESET: 407 vcpu->stat.instruction_sigp_init_cpu_reset++; 408 break; 409 case SIGP_CPU_RESET: 410 vcpu->stat.instruction_sigp_cpu_reset++; 411 break; 412 default: 413 vcpu->stat.instruction_sigp_unknown++; 414 } 415 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", 416 order_code, cpu_addr); 417 418 return 1; 419 } 420 421 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) 422 { 423 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 424 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 425 u32 parameter; 426 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 427 u8 order_code; 428 int rc; 429 430 /* sigp in userspace can exit */ 431 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 433 434 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 435 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) 436 return -EOPNOTSUPP; 437 438 if (r1 % 2) 439 parameter = vcpu->run->s.regs.gprs[r1]; 440 else 441 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 442 443 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); 444 switch (order_code) { 445 case SIGP_SET_ARCHITECTURE: 446 vcpu->stat.instruction_sigp_arch++; 447 rc = __sigp_set_arch(vcpu, parameter, 448 &vcpu->run->s.regs.gprs[r1]); 449 break; 450 default: 451 rc = handle_sigp_dst(vcpu, order_code, cpu_addr, 452 parameter, 453 &vcpu->run->s.regs.gprs[r1]); 454 } 455 456 if (rc < 0) 457 return rc; 458 459 kvm_s390_set_psw_cc(vcpu, rc); 460 return 0; 461 } 462 463 /* 464 * Handle SIGP partial execution interception. 465 * 466 * This interception will occur at the source cpu when a source cpu sends an 467 * external call to a target cpu and the target cpu has the WAIT bit set in 468 * its cpuflags. Interception will occurr after the interrupt indicator bits at 469 * the target cpu have been set. All error cases will lead to instruction 470 * interception, therefore nothing is to be checked or prepared. 471 */ 472 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) 473 { 474 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 475 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 476 struct kvm_vcpu *dest_vcpu; 477 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 478 479 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 480 481 if (order_code == SIGP_EXTERNAL_CALL) { 482 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); 483 BUG_ON(dest_vcpu == NULL); 484 485 kvm_s390_vcpu_wakeup(dest_vcpu); 486 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); 487 return 0; 488 } 489 490 return -EOPNOTSUPP; 491 } 492