1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright IBM Corp. 2007 5 * 6 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 8 */ 9 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/kvm_host.h> 13 #include <linux/vmalloc.h> 14 #include <linux/hrtimer.h> 15 #include <linux/sched/signal.h> 16 #include <linux/fs.h> 17 #include <linux/slab.h> 18 #include <linux/file.h> 19 #include <linux/module.h> 20 #include <linux/irqbypass.h> 21 #include <linux/kvm_irqfd.h> 22 #include <linux/of.h> 23 #include <asm/cputable.h> 24 #include <linux/uaccess.h> 25 #include <asm/kvm_ppc.h> 26 #include <asm/cputhreads.h> 27 #include <asm/irqflags.h> 28 #include <asm/iommu.h> 29 #include <asm/switch_to.h> 30 #include <asm/xive.h> 31 #ifdef CONFIG_PPC_PSERIES 32 #include <asm/hvcall.h> 33 #include <asm/plpar_wrappers.h> 34 #endif 35 #include <asm/ultravisor.h> 36 #include <asm/setup.h> 37 38 #include "timing.h" 39 #include "../mm/mmu_decl.h" 40 41 #define CREATE_TRACE_POINTS 42 #include "trace.h" 43 44 struct kvmppc_ops *kvmppc_hv_ops; 45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 46 struct kvmppc_ops *kvmppc_pr_ops; 47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 48 49 50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 51 { 52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 53 } 54 55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 56 { 57 return kvm_arch_vcpu_runnable(vcpu); 58 } 59 60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 61 { 62 return false; 63 } 64 65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 66 { 67 return 1; 68 } 69 70 /* 71 * Common checks before entering the guest world. Call with interrupts 72 * disabled. 73 * 74 * returns: 75 * 76 * == 1 if we're ready to go into guest state 77 * <= 0 if we need to go back to the host with return value 78 */ 79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 80 { 81 int r; 82 83 WARN_ON(irqs_disabled()); 84 hard_irq_disable(); 85 86 while (true) { 87 if (need_resched()) { 88 local_irq_enable(); 89 cond_resched(); 90 hard_irq_disable(); 91 continue; 92 } 93 94 if (signal_pending(current)) { 95 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 96 vcpu->run->exit_reason = KVM_EXIT_INTR; 97 r = -EINTR; 98 break; 99 } 100 101 vcpu->mode = IN_GUEST_MODE; 102 103 /* 104 * Reading vcpu->requests must happen after setting vcpu->mode, 105 * so we don't miss a request because the requester sees 106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 107 * before next entering the guest (and thus doesn't IPI). 108 * This also orders the write to mode from any reads 109 * to the page tables done while the VCPU is running. 110 * Please see the comment in kvm_flush_remote_tlbs. 111 */ 112 smp_mb(); 113 114 if (kvm_request_pending(vcpu)) { 115 /* Make sure we process requests preemptable */ 116 local_irq_enable(); 117 trace_kvm_check_requests(vcpu); 118 r = kvmppc_core_check_requests(vcpu); 119 hard_irq_disable(); 120 if (r > 0) 121 continue; 122 break; 123 } 124 125 if (kvmppc_core_prepare_to_enter(vcpu)) { 126 /* interrupts got enabled in between, so we 127 are back at square 1 */ 128 continue; 129 } 130 131 guest_enter_irqoff(); 132 return 1; 133 } 134 135 /* return to host */ 136 local_irq_enable(); 137 return r; 138 } 139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 140 141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 143 { 144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 145 int i; 146 147 shared->sprg0 = swab64(shared->sprg0); 148 shared->sprg1 = swab64(shared->sprg1); 149 shared->sprg2 = swab64(shared->sprg2); 150 shared->sprg3 = swab64(shared->sprg3); 151 shared->srr0 = swab64(shared->srr0); 152 shared->srr1 = swab64(shared->srr1); 153 shared->dar = swab64(shared->dar); 154 shared->msr = swab64(shared->msr); 155 shared->dsisr = swab32(shared->dsisr); 156 shared->int_pending = swab32(shared->int_pending); 157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 158 shared->sr[i] = swab32(shared->sr[i]); 159 } 160 #endif 161 162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 163 { 164 int nr = kvmppc_get_gpr(vcpu, 11); 165 int r; 166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 170 unsigned long r2 = 0; 171 172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 173 /* 32 bit mode */ 174 param1 &= 0xffffffff; 175 param2 &= 0xffffffff; 176 param3 &= 0xffffffff; 177 param4 &= 0xffffffff; 178 } 179 180 switch (nr) { 181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 182 { 183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 184 /* Book3S can be little endian, find it out here */ 185 int shared_big_endian = true; 186 if (vcpu->arch.intr_msr & MSR_LE) 187 shared_big_endian = false; 188 if (shared_big_endian != vcpu->arch.shared_big_endian) 189 kvmppc_swab_shared(vcpu); 190 vcpu->arch.shared_big_endian = shared_big_endian; 191 #endif 192 193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 194 /* 195 * Older versions of the Linux magic page code had 196 * a bug where they would map their trampoline code 197 * NX. If that's the case, remove !PR NX capability. 198 */ 199 vcpu->arch.disable_kernel_nx = true; 200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 201 } 202 203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 205 206 #ifdef CONFIG_PPC_64K_PAGES 207 /* 208 * Make sure our 4k magic page is in the same window of a 64k 209 * page within the guest and within the host's page. 210 */ 211 if ((vcpu->arch.magic_page_pa & 0xf000) != 212 ((ulong)vcpu->arch.shared & 0xf000)) { 213 void *old_shared = vcpu->arch.shared; 214 ulong shared = (ulong)vcpu->arch.shared; 215 void *new_shared; 216 217 shared &= PAGE_MASK; 218 shared |= vcpu->arch.magic_page_pa & 0xf000; 219 new_shared = (void*)shared; 220 memcpy(new_shared, old_shared, 0x1000); 221 vcpu->arch.shared = new_shared; 222 } 223 #endif 224 225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 226 227 r = EV_SUCCESS; 228 break; 229 } 230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 231 r = EV_SUCCESS; 232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 234 #endif 235 236 /* Second return value is in r4 */ 237 break; 238 case EV_HCALL_TOKEN(EV_IDLE): 239 r = EV_SUCCESS; 240 kvm_vcpu_halt(vcpu); 241 break; 242 default: 243 r = EV_UNIMPLEMENTED; 244 break; 245 } 246 247 kvmppc_set_gpr(vcpu, 4, r2); 248 249 return r; 250 } 251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 252 253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 254 { 255 int r = false; 256 257 /* We have to know what CPU to virtualize */ 258 if (!vcpu->arch.pvr) 259 goto out; 260 261 /* PAPR only works with book3s_64 */ 262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 263 goto out; 264 265 /* HV KVM can only do PAPR mode for now */ 266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 267 goto out; 268 269 #ifdef CONFIG_KVM_BOOKE_HV 270 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 271 goto out; 272 #endif 273 274 r = true; 275 276 out: 277 vcpu->arch.sane = r; 278 return r ? 0 : -EINVAL; 279 } 280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 281 282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) 283 { 284 enum emulation_result er; 285 int r; 286 287 er = kvmppc_emulate_loadstore(vcpu); 288 switch (er) { 289 case EMULATE_DONE: 290 /* Future optimization: only reload non-volatiles if they were 291 * actually modified. */ 292 r = RESUME_GUEST_NV; 293 break; 294 case EMULATE_AGAIN: 295 r = RESUME_GUEST; 296 break; 297 case EMULATE_DO_MMIO: 298 vcpu->run->exit_reason = KVM_EXIT_MMIO; 299 /* We must reload nonvolatiles because "update" load/store 300 * instructions modify register state. */ 301 /* Future optimization: only reload non-volatiles if they were 302 * actually modified. */ 303 r = RESUME_HOST_NV; 304 break; 305 case EMULATE_FAIL: 306 { 307 ppc_inst_t last_inst; 308 309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", 311 ppc_inst_val(last_inst)); 312 313 /* 314 * Injecting a Data Storage here is a bit more 315 * accurate since the instruction that caused the 316 * access could still be a valid one. 317 */ 318 if (!IS_ENABLED(CONFIG_BOOKE)) { 319 ulong dsisr = DSISR_BADACCESS; 320 321 if (vcpu->mmio_is_write) 322 dsisr |= DSISR_ISSTORE; 323 324 kvmppc_core_queue_data_storage(vcpu, 325 kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 326 vcpu->arch.vaddr_accessed, dsisr); 327 } else { 328 /* 329 * BookE does not send a SIGBUS on a bad 330 * fault, so use a Program interrupt instead 331 * to avoid a fault loop. 332 */ 333 kvmppc_core_queue_program(vcpu, 0); 334 } 335 336 r = RESUME_GUEST; 337 break; 338 } 339 default: 340 WARN_ON(1); 341 r = RESUME_GUEST; 342 } 343 344 return r; 345 } 346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 347 348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 349 bool data) 350 { 351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 352 struct kvmppc_pte pte; 353 int r = -EINVAL; 354 355 vcpu->stat.st++; 356 357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) 358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, 359 size); 360 361 if ((!r) || (r == -EAGAIN)) 362 return r; 363 364 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 365 XLATE_WRITE, &pte); 366 if (r < 0) 367 return r; 368 369 *eaddr = pte.raddr; 370 371 if (!pte.may_write) 372 return -EPERM; 373 374 /* Magic page override */ 375 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 376 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 377 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 378 void *magic = vcpu->arch.shared; 379 magic += pte.eaddr & 0xfff; 380 memcpy(magic, ptr, size); 381 return EMULATE_DONE; 382 } 383 384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 385 return EMULATE_DO_MMIO; 386 387 return EMULATE_DONE; 388 } 389 EXPORT_SYMBOL_GPL(kvmppc_st); 390 391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 392 bool data) 393 { 394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 395 struct kvmppc_pte pte; 396 int rc = -EINVAL; 397 398 vcpu->stat.ld++; 399 400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) 401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, 402 size); 403 404 if ((!rc) || (rc == -EAGAIN)) 405 return rc; 406 407 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 408 XLATE_READ, &pte); 409 if (rc) 410 return rc; 411 412 *eaddr = pte.raddr; 413 414 if (!pte.may_read) 415 return -EPERM; 416 417 if (!data && !pte.may_execute) 418 return -ENOEXEC; 419 420 /* Magic page override */ 421 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 422 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 423 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 424 void *magic = vcpu->arch.shared; 425 magic += pte.eaddr & 0xfff; 426 memcpy(ptr, magic, size); 427 return EMULATE_DONE; 428 } 429 430 kvm_vcpu_srcu_read_lock(vcpu); 431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); 432 kvm_vcpu_srcu_read_unlock(vcpu); 433 if (rc) 434 return EMULATE_DO_MMIO; 435 436 return EMULATE_DONE; 437 } 438 EXPORT_SYMBOL_GPL(kvmppc_ld); 439 440 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 441 { 442 struct kvmppc_ops *kvm_ops = NULL; 443 int r; 444 445 /* 446 * if we have both HV and PR enabled, default is HV 447 */ 448 if (type == 0) { 449 if (kvmppc_hv_ops) 450 kvm_ops = kvmppc_hv_ops; 451 else 452 kvm_ops = kvmppc_pr_ops; 453 if (!kvm_ops) 454 goto err_out; 455 } else if (type == KVM_VM_PPC_HV) { 456 if (!kvmppc_hv_ops) 457 goto err_out; 458 kvm_ops = kvmppc_hv_ops; 459 } else if (type == KVM_VM_PPC_PR) { 460 if (!kvmppc_pr_ops) 461 goto err_out; 462 kvm_ops = kvmppc_pr_ops; 463 } else 464 goto err_out; 465 466 if (!try_module_get(kvm_ops->owner)) 467 return -ENOENT; 468 469 kvm->arch.kvm_ops = kvm_ops; 470 r = kvmppc_core_init_vm(kvm); 471 if (r) 472 module_put(kvm_ops->owner); 473 return r; 474 err_out: 475 return -EINVAL; 476 } 477 478 void kvm_arch_destroy_vm(struct kvm *kvm) 479 { 480 #ifdef CONFIG_KVM_XICS 481 /* 482 * We call kick_all_cpus_sync() to ensure that all 483 * CPUs have executed any pending IPIs before we 484 * continue and free VCPUs structures below. 485 */ 486 if (is_kvmppc_hv_enabled(kvm)) 487 kick_all_cpus_sync(); 488 #endif 489 490 kvm_destroy_vcpus(kvm); 491 492 mutex_lock(&kvm->lock); 493 494 kvmppc_core_destroy_vm(kvm); 495 496 mutex_unlock(&kvm->lock); 497 498 /* drop the module reference */ 499 module_put(kvm->arch.kvm_ops->owner); 500 } 501 502 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 503 { 504 int r; 505 /* Assume we're using HV mode when the HV module is loaded */ 506 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 507 508 if (kvm) { 509 /* 510 * Hooray - we know which VM type we're running on. Depend on 511 * that rather than the guess above. 512 */ 513 hv_enabled = is_kvmppc_hv_enabled(kvm); 514 } 515 516 switch (ext) { 517 #ifdef CONFIG_BOOKE 518 case KVM_CAP_PPC_BOOKE_SREGS: 519 case KVM_CAP_PPC_BOOKE_WATCHDOG: 520 case KVM_CAP_PPC_EPR: 521 #else 522 case KVM_CAP_PPC_SEGSTATE: 523 case KVM_CAP_PPC_HIOR: 524 case KVM_CAP_PPC_PAPR: 525 #endif 526 case KVM_CAP_PPC_UNSET_IRQ: 527 case KVM_CAP_PPC_IRQ_LEVEL: 528 case KVM_CAP_ENABLE_CAP: 529 case KVM_CAP_ONE_REG: 530 case KVM_CAP_IOEVENTFD: 531 case KVM_CAP_IMMEDIATE_EXIT: 532 case KVM_CAP_SET_GUEST_DEBUG: 533 r = 1; 534 break; 535 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: 536 case KVM_CAP_PPC_PAIRED_SINGLES: 537 case KVM_CAP_PPC_OSI: 538 case KVM_CAP_PPC_GET_PVINFO: 539 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 540 case KVM_CAP_SW_TLB: 541 #endif 542 /* We support this only for PR */ 543 r = !hv_enabled; 544 break; 545 #ifdef CONFIG_KVM_MPIC 546 case KVM_CAP_IRQ_MPIC: 547 r = 1; 548 break; 549 #endif 550 551 #ifdef CONFIG_PPC_BOOK3S_64 552 case KVM_CAP_SPAPR_TCE: 553 fallthrough; 554 case KVM_CAP_SPAPR_TCE_64: 555 case KVM_CAP_SPAPR_TCE_VFIO: 556 case KVM_CAP_PPC_RTAS: 557 case KVM_CAP_PPC_FIXUP_HCALL: 558 case KVM_CAP_PPC_ENABLE_HCALL: 559 #ifdef CONFIG_KVM_XICS 560 case KVM_CAP_IRQ_XICS: 561 #endif 562 case KVM_CAP_PPC_GET_CPU_CHAR: 563 r = 1; 564 break; 565 #ifdef CONFIG_KVM_XIVE 566 case KVM_CAP_PPC_IRQ_XIVE: 567 /* 568 * We need XIVE to be enabled on the platform (implies 569 * a POWER9 processor) and the PowerNV platform, as 570 * nested is not yet supported. 571 */ 572 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) && 573 kvmppc_xive_native_supported(); 574 break; 575 #endif 576 577 #ifdef CONFIG_HAVE_KVM_IRQCHIP 578 case KVM_CAP_IRQFD_RESAMPLE: 579 r = !xive_enabled(); 580 break; 581 #endif 582 583 case KVM_CAP_PPC_ALLOC_HTAB: 584 r = hv_enabled; 585 break; 586 #endif /* CONFIG_PPC_BOOK3S_64 */ 587 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 588 case KVM_CAP_PPC_SMT: 589 r = 0; 590 if (kvm) { 591 if (kvm->arch.emul_smt_mode > 1) 592 r = kvm->arch.emul_smt_mode; 593 else 594 r = kvm->arch.smt_mode; 595 } else if (hv_enabled) { 596 if (cpu_has_feature(CPU_FTR_ARCH_300)) 597 r = 1; 598 else 599 r = threads_per_subcore; 600 } 601 break; 602 case KVM_CAP_PPC_SMT_POSSIBLE: 603 r = 1; 604 if (hv_enabled) { 605 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 606 r = ((threads_per_subcore << 1) - 1); 607 else 608 /* P9 can emulate dbells, so allow any mode */ 609 r = 8 | 4 | 2 | 1; 610 } 611 break; 612 case KVM_CAP_PPC_HWRNG: 613 r = kvmppc_hwrng_present(); 614 break; 615 case KVM_CAP_PPC_MMU_RADIX: 616 r = !!(hv_enabled && radix_enabled()); 617 break; 618 case KVM_CAP_PPC_MMU_HASH_V3: 619 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && 620 kvmppc_hv_ops->hash_v3_possible()); 621 break; 622 case KVM_CAP_PPC_NESTED_HV: 623 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && 624 !kvmppc_hv_ops->enable_nested(NULL)); 625 break; 626 #endif 627 case KVM_CAP_SYNC_MMU: 628 BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); 629 r = 1; 630 break; 631 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 632 case KVM_CAP_PPC_HTAB_FD: 633 r = hv_enabled; 634 break; 635 #endif 636 case KVM_CAP_NR_VCPUS: 637 /* 638 * Recommending a number of CPUs is somewhat arbitrary; we 639 * return the number of present CPUs for -HV (since a host 640 * will have secondary threads "offline"), and for other KVM 641 * implementations just count online CPUs. 642 */ 643 if (hv_enabled) 644 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS); 645 else 646 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 647 break; 648 case KVM_CAP_MAX_VCPUS: 649 r = KVM_MAX_VCPUS; 650 break; 651 case KVM_CAP_MAX_VCPU_ID: 652 r = KVM_MAX_VCPU_IDS; 653 break; 654 #ifdef CONFIG_PPC_BOOK3S_64 655 case KVM_CAP_PPC_GET_SMMU_INFO: 656 r = 1; 657 break; 658 case KVM_CAP_SPAPR_MULTITCE: 659 r = 1; 660 break; 661 case KVM_CAP_SPAPR_RESIZE_HPT: 662 r = !!hv_enabled; 663 break; 664 #endif 665 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 666 case KVM_CAP_PPC_FWNMI: 667 r = hv_enabled; 668 break; 669 #endif 670 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 671 case KVM_CAP_PPC_HTM: 672 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || 673 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); 674 break; 675 #endif 676 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 677 case KVM_CAP_PPC_SECURE_GUEST: 678 r = hv_enabled && kvmppc_hv_ops->enable_svm && 679 !kvmppc_hv_ops->enable_svm(NULL); 680 break; 681 case KVM_CAP_PPC_DAWR1: 682 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && 683 !kvmppc_hv_ops->enable_dawr1(NULL)); 684 break; 685 case KVM_CAP_PPC_RPT_INVALIDATE: 686 r = 1; 687 break; 688 #endif 689 case KVM_CAP_PPC_AIL_MODE_3: 690 r = 0; 691 /* 692 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode. 693 * The POWER9s can support it if the guest runs in hash mode, 694 * but QEMU doesn't necessarily query the capability in time. 695 */ 696 if (hv_enabled) { 697 if (kvmhv_on_pseries()) { 698 if (pseries_reloc_on_exception()) 699 r = 1; 700 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) && 701 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { 702 r = 1; 703 } 704 } 705 break; 706 default: 707 r = 0; 708 break; 709 } 710 return r; 711 712 } 713 714 long kvm_arch_dev_ioctl(struct file *filp, 715 unsigned int ioctl, unsigned long arg) 716 { 717 return -EINVAL; 718 } 719 720 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 721 { 722 kvmppc_core_free_memslot(kvm, slot); 723 } 724 725 int kvm_arch_prepare_memory_region(struct kvm *kvm, 726 const struct kvm_memory_slot *old, 727 struct kvm_memory_slot *new, 728 enum kvm_mr_change change) 729 { 730 return kvmppc_core_prepare_memory_region(kvm, old, new, change); 731 } 732 733 void kvm_arch_commit_memory_region(struct kvm *kvm, 734 struct kvm_memory_slot *old, 735 const struct kvm_memory_slot *new, 736 enum kvm_mr_change change) 737 { 738 kvmppc_core_commit_memory_region(kvm, old, new, change); 739 } 740 741 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 742 struct kvm_memory_slot *slot) 743 { 744 kvmppc_core_flush_memslot(kvm, slot); 745 } 746 747 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 748 { 749 return 0; 750 } 751 752 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 753 { 754 struct kvm_vcpu *vcpu; 755 756 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 757 kvmppc_decrementer_func(vcpu); 758 759 return HRTIMER_NORESTART; 760 } 761 762 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 763 { 764 int err; 765 766 hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME, 767 HRTIMER_MODE_ABS); 768 769 #ifdef CONFIG_KVM_EXIT_TIMING 770 mutex_init(&vcpu->arch.exit_timing_lock); 771 #endif 772 err = kvmppc_subarch_vcpu_init(vcpu); 773 if (err) 774 return err; 775 776 err = kvmppc_core_vcpu_create(vcpu); 777 if (err) 778 goto out_vcpu_uninit; 779 780 rcuwait_init(&vcpu->arch.wait); 781 vcpu->arch.waitp = &vcpu->arch.wait; 782 return 0; 783 784 out_vcpu_uninit: 785 kvmppc_subarch_vcpu_uninit(vcpu); 786 return err; 787 } 788 789 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 790 { 791 } 792 793 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 794 { 795 /* Make sure we're not using the vcpu anymore */ 796 hrtimer_cancel(&vcpu->arch.dec_timer); 797 798 switch (vcpu->arch.irq_type) { 799 case KVMPPC_IRQ_MPIC: 800 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 801 break; 802 case KVMPPC_IRQ_XICS: 803 if (xics_on_xive()) 804 kvmppc_xive_cleanup_vcpu(vcpu); 805 else 806 kvmppc_xics_free_icp(vcpu); 807 break; 808 case KVMPPC_IRQ_XIVE: 809 kvmppc_xive_native_cleanup_vcpu(vcpu); 810 break; 811 } 812 813 kvmppc_core_vcpu_free(vcpu); 814 815 kvmppc_subarch_vcpu_uninit(vcpu); 816 } 817 818 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 819 { 820 return kvmppc_core_pending_dec(vcpu); 821 } 822 823 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 824 { 825 #ifdef CONFIG_BOOKE 826 /* 827 * vrsave (formerly usprg0) isn't used by Linux, but may 828 * be used by the guest. 829 * 830 * On non-booke this is associated with Altivec and 831 * is handled by code in book3s.c. 832 */ 833 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 834 #endif 835 kvmppc_core_vcpu_load(vcpu, cpu); 836 } 837 838 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 839 { 840 kvmppc_core_vcpu_put(vcpu); 841 #ifdef CONFIG_BOOKE 842 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 843 #endif 844 } 845 846 /* 847 * irq_bypass_add_producer and irq_bypass_del_producer are only 848 * useful if the architecture supports PCI passthrough. 849 * irq_bypass_stop and irq_bypass_start are not needed and so 850 * kvm_ops are not defined for them. 851 */ 852 bool kvm_arch_has_irq_bypass(void) 853 { 854 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || 855 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); 856 } 857 858 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 859 struct irq_bypass_producer *prod) 860 { 861 struct kvm_kernel_irqfd *irqfd = 862 container_of(cons, struct kvm_kernel_irqfd, consumer); 863 struct kvm *kvm = irqfd->kvm; 864 865 if (kvm->arch.kvm_ops->irq_bypass_add_producer) 866 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); 867 868 return 0; 869 } 870 871 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 872 struct irq_bypass_producer *prod) 873 { 874 struct kvm_kernel_irqfd *irqfd = 875 container_of(cons, struct kvm_kernel_irqfd, consumer); 876 struct kvm *kvm = irqfd->kvm; 877 878 if (kvm->arch.kvm_ops->irq_bypass_del_producer) 879 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 880 } 881 882 #ifdef CONFIG_VSX 883 static inline int kvmppc_get_vsr_dword_offset(int index) 884 { 885 int offset; 886 887 if ((index != 0) && (index != 1)) 888 return -1; 889 890 #ifdef __BIG_ENDIAN 891 offset = index; 892 #else 893 offset = 1 - index; 894 #endif 895 896 return offset; 897 } 898 899 static inline int kvmppc_get_vsr_word_offset(int index) 900 { 901 int offset; 902 903 if ((index > 3) || (index < 0)) 904 return -1; 905 906 #ifdef __BIG_ENDIAN 907 offset = index; 908 #else 909 offset = 3 - index; 910 #endif 911 return offset; 912 } 913 914 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 915 u64 gpr) 916 { 917 union kvmppc_one_reg val; 918 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 919 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 920 921 if (offset == -1) 922 return; 923 924 if (index >= 32) { 925 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); 926 val.vsxval[offset] = gpr; 927 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); 928 } else { 929 kvmppc_set_vsx_fpr(vcpu, index, offset, gpr); 930 } 931 } 932 933 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 934 u64 gpr) 935 { 936 union kvmppc_one_reg val; 937 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 938 939 if (index >= 32) { 940 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); 941 val.vsxval[0] = gpr; 942 val.vsxval[1] = gpr; 943 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); 944 } else { 945 kvmppc_set_vsx_fpr(vcpu, index, 0, gpr); 946 kvmppc_set_vsx_fpr(vcpu, index, 1, gpr); 947 } 948 } 949 950 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, 951 u32 gpr) 952 { 953 union kvmppc_one_reg val; 954 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 955 956 if (index >= 32) { 957 val.vsx32val[0] = gpr; 958 val.vsx32val[1] = gpr; 959 val.vsx32val[2] = gpr; 960 val.vsx32val[3] = gpr; 961 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); 962 } else { 963 val.vsx32val[0] = gpr; 964 val.vsx32val[1] = gpr; 965 kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]); 966 kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]); 967 } 968 } 969 970 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 971 u32 gpr32) 972 { 973 union kvmppc_one_reg val; 974 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 975 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 976 int dword_offset, word_offset; 977 978 if (offset == -1) 979 return; 980 981 if (index >= 32) { 982 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); 983 val.vsx32val[offset] = gpr32; 984 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); 985 } else { 986 dword_offset = offset / 2; 987 word_offset = offset % 2; 988 val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset); 989 val.vsx32val[word_offset] = gpr32; 990 kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]); 991 } 992 } 993 #endif /* CONFIG_VSX */ 994 995 #ifdef CONFIG_ALTIVEC 996 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, 997 int index, int element_size) 998 { 999 int offset; 1000 int elts = sizeof(vector128)/element_size; 1001 1002 if ((index < 0) || (index >= elts)) 1003 return -1; 1004 1005 if (kvmppc_need_byteswap(vcpu)) 1006 offset = elts - index - 1; 1007 else 1008 offset = index; 1009 1010 return offset; 1011 } 1012 1013 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, 1014 int index) 1015 { 1016 return kvmppc_get_vmx_offset_generic(vcpu, index, 8); 1017 } 1018 1019 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, 1020 int index) 1021 { 1022 return kvmppc_get_vmx_offset_generic(vcpu, index, 4); 1023 } 1024 1025 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, 1026 int index) 1027 { 1028 return kvmppc_get_vmx_offset_generic(vcpu, index, 2); 1029 } 1030 1031 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, 1032 int index) 1033 { 1034 return kvmppc_get_vmx_offset_generic(vcpu, index, 1); 1035 } 1036 1037 1038 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, 1039 u64 gpr) 1040 { 1041 union kvmppc_one_reg val; 1042 int offset = kvmppc_get_vmx_dword_offset(vcpu, 1043 vcpu->arch.mmio_vmx_offset); 1044 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1045 1046 if (offset == -1) 1047 return; 1048 1049 kvmppc_get_vsx_vr(vcpu, index, &val.vval); 1050 val.vsxval[offset] = gpr; 1051 kvmppc_set_vsx_vr(vcpu, index, &val.vval); 1052 } 1053 1054 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, 1055 u32 gpr32) 1056 { 1057 union kvmppc_one_reg val; 1058 int offset = kvmppc_get_vmx_word_offset(vcpu, 1059 vcpu->arch.mmio_vmx_offset); 1060 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1061 1062 if (offset == -1) 1063 return; 1064 1065 kvmppc_get_vsx_vr(vcpu, index, &val.vval); 1066 val.vsx32val[offset] = gpr32; 1067 kvmppc_set_vsx_vr(vcpu, index, &val.vval); 1068 } 1069 1070 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, 1071 u16 gpr16) 1072 { 1073 union kvmppc_one_reg val; 1074 int offset = kvmppc_get_vmx_hword_offset(vcpu, 1075 vcpu->arch.mmio_vmx_offset); 1076 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1077 1078 if (offset == -1) 1079 return; 1080 1081 kvmppc_get_vsx_vr(vcpu, index, &val.vval); 1082 val.vsx16val[offset] = gpr16; 1083 kvmppc_set_vsx_vr(vcpu, index, &val.vval); 1084 } 1085 1086 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, 1087 u8 gpr8) 1088 { 1089 union kvmppc_one_reg val; 1090 int offset = kvmppc_get_vmx_byte_offset(vcpu, 1091 vcpu->arch.mmio_vmx_offset); 1092 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 1093 1094 if (offset == -1) 1095 return; 1096 1097 kvmppc_get_vsx_vr(vcpu, index, &val.vval); 1098 val.vsx8val[offset] = gpr8; 1099 kvmppc_set_vsx_vr(vcpu, index, &val.vval); 1100 } 1101 #endif /* CONFIG_ALTIVEC */ 1102 1103 #ifdef CONFIG_PPC_FPU 1104 static inline u64 sp_to_dp(u32 fprs) 1105 { 1106 u64 fprd; 1107 1108 preempt_disable(); 1109 enable_kernel_fp(); 1110 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs) 1111 : "fr0"); 1112 preempt_enable(); 1113 return fprd; 1114 } 1115 1116 static inline u32 dp_to_sp(u64 fprd) 1117 { 1118 u32 fprs; 1119 1120 preempt_disable(); 1121 enable_kernel_fp(); 1122 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd) 1123 : "fr0"); 1124 preempt_enable(); 1125 return fprs; 1126 } 1127 1128 #else 1129 #define sp_to_dp(x) (x) 1130 #define dp_to_sp(x) (x) 1131 #endif /* CONFIG_PPC_FPU */ 1132 1133 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) 1134 { 1135 struct kvm_run *run = vcpu->run; 1136 u64 gpr; 1137 1138 if (run->mmio.len > sizeof(gpr)) 1139 return; 1140 1141 if (!vcpu->arch.mmio_host_swabbed) { 1142 switch (run->mmio.len) { 1143 case 8: gpr = *(u64 *)run->mmio.data; break; 1144 case 4: gpr = *(u32 *)run->mmio.data; break; 1145 case 2: gpr = *(u16 *)run->mmio.data; break; 1146 case 1: gpr = *(u8 *)run->mmio.data; break; 1147 } 1148 } else { 1149 switch (run->mmio.len) { 1150 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 1151 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 1152 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 1153 case 1: gpr = *(u8 *)run->mmio.data; break; 1154 } 1155 } 1156 1157 /* conversion between single and double precision */ 1158 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 1159 gpr = sp_to_dp(gpr); 1160 1161 if (vcpu->arch.mmio_sign_extend) { 1162 switch (run->mmio.len) { 1163 #ifdef CONFIG_PPC64 1164 case 4: 1165 gpr = (s64)(s32)gpr; 1166 break; 1167 #endif 1168 case 2: 1169 gpr = (s64)(s16)gpr; 1170 break; 1171 case 1: 1172 gpr = (s64)(s8)gpr; 1173 break; 1174 } 1175 } 1176 1177 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 1178 case KVM_MMIO_REG_GPR: 1179 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 1180 break; 1181 case KVM_MMIO_REG_FPR: 1182 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1183 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); 1184 1185 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); 1186 break; 1187 #ifdef CONFIG_PPC_BOOK3S 1188 case KVM_MMIO_REG_QPR: 1189 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1190 break; 1191 case KVM_MMIO_REG_FQPR: 1192 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); 1193 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 1194 break; 1195 #endif 1196 #ifdef CONFIG_VSX 1197 case KVM_MMIO_REG_VSX: 1198 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1199 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); 1200 1201 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) 1202 kvmppc_set_vsr_dword(vcpu, gpr); 1203 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) 1204 kvmppc_set_vsr_word(vcpu, gpr); 1205 else if (vcpu->arch.mmio_copy_type == 1206 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 1207 kvmppc_set_vsr_dword_dump(vcpu, gpr); 1208 else if (vcpu->arch.mmio_copy_type == 1209 KVMPPC_VSX_COPY_WORD_LOAD_DUMP) 1210 kvmppc_set_vsr_word_dump(vcpu, gpr); 1211 break; 1212 #endif 1213 #ifdef CONFIG_ALTIVEC 1214 case KVM_MMIO_REG_VMX: 1215 if (vcpu->kvm->arch.kvm_ops->giveup_ext) 1216 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); 1217 1218 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) 1219 kvmppc_set_vmx_dword(vcpu, gpr); 1220 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) 1221 kvmppc_set_vmx_word(vcpu, gpr); 1222 else if (vcpu->arch.mmio_copy_type == 1223 KVMPPC_VMX_COPY_HWORD) 1224 kvmppc_set_vmx_hword(vcpu, gpr); 1225 else if (vcpu->arch.mmio_copy_type == 1226 KVMPPC_VMX_COPY_BYTE) 1227 kvmppc_set_vmx_byte(vcpu, gpr); 1228 break; 1229 #endif 1230 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1231 case KVM_MMIO_REG_NESTED_GPR: 1232 if (kvmppc_need_byteswap(vcpu)) 1233 gpr = swab64(gpr); 1234 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, 1235 sizeof(gpr)); 1236 break; 1237 #endif 1238 default: 1239 BUG(); 1240 } 1241 } 1242 1243 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, 1244 unsigned int rt, unsigned int bytes, 1245 int is_default_endian, int sign_extend) 1246 { 1247 struct kvm_run *run = vcpu->run; 1248 int idx, ret; 1249 bool host_swabbed; 1250 1251 /* Pity C doesn't have a logical XOR operator */ 1252 if (kvmppc_need_byteswap(vcpu)) { 1253 host_swabbed = is_default_endian; 1254 } else { 1255 host_swabbed = !is_default_endian; 1256 } 1257 1258 if (bytes > sizeof(run->mmio.data)) 1259 return EMULATE_FAIL; 1260 1261 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1262 run->mmio.len = bytes; 1263 run->mmio.is_write = 0; 1264 1265 vcpu->arch.io_gpr = rt; 1266 vcpu->arch.mmio_host_swabbed = host_swabbed; 1267 vcpu->mmio_needed = 1; 1268 vcpu->mmio_is_write = 0; 1269 vcpu->arch.mmio_sign_extend = sign_extend; 1270 1271 idx = srcu_read_lock(&vcpu->kvm->srcu); 1272 1273 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1274 bytes, &run->mmio.data); 1275 1276 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1277 1278 if (!ret) { 1279 kvmppc_complete_mmio_load(vcpu); 1280 vcpu->mmio_needed = 0; 1281 return EMULATE_DONE; 1282 } 1283 1284 return EMULATE_DO_MMIO; 1285 } 1286 1287 int kvmppc_handle_load(struct kvm_vcpu *vcpu, 1288 unsigned int rt, unsigned int bytes, 1289 int is_default_endian) 1290 { 1291 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); 1292 } 1293 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1294 1295 /* Same as above, but sign extends */ 1296 int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 1297 unsigned int rt, unsigned int bytes, 1298 int is_default_endian) 1299 { 1300 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); 1301 } 1302 1303 #ifdef CONFIG_VSX 1304 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 1305 unsigned int rt, unsigned int bytes, 1306 int is_default_endian, int mmio_sign_extend) 1307 { 1308 enum emulation_result emulated = EMULATE_DONE; 1309 1310 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1311 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1312 return EMULATE_FAIL; 1313 1314 while (vcpu->arch.mmio_vsx_copy_nums) { 1315 emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1316 is_default_endian, mmio_sign_extend); 1317 1318 if (emulated != EMULATE_DONE) 1319 break; 1320 1321 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1322 1323 vcpu->arch.mmio_vsx_copy_nums--; 1324 vcpu->arch.mmio_vsx_offset++; 1325 } 1326 return emulated; 1327 } 1328 #endif /* CONFIG_VSX */ 1329 1330 int kvmppc_handle_store(struct kvm_vcpu *vcpu, 1331 u64 val, unsigned int bytes, int is_default_endian) 1332 { 1333 struct kvm_run *run = vcpu->run; 1334 void *data = run->mmio.data; 1335 int idx, ret; 1336 bool host_swabbed; 1337 1338 /* Pity C doesn't have a logical XOR operator */ 1339 if (kvmppc_need_byteswap(vcpu)) { 1340 host_swabbed = is_default_endian; 1341 } else { 1342 host_swabbed = !is_default_endian; 1343 } 1344 1345 if (bytes > sizeof(run->mmio.data)) 1346 return EMULATE_FAIL; 1347 1348 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 1349 run->mmio.len = bytes; 1350 run->mmio.is_write = 1; 1351 vcpu->mmio_needed = 1; 1352 vcpu->mmio_is_write = 1; 1353 1354 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 1355 val = dp_to_sp(val); 1356 1357 /* Store the value at the lowest bytes in 'data'. */ 1358 if (!host_swabbed) { 1359 switch (bytes) { 1360 case 8: *(u64 *)data = val; break; 1361 case 4: *(u32 *)data = val; break; 1362 case 2: *(u16 *)data = val; break; 1363 case 1: *(u8 *)data = val; break; 1364 } 1365 } else { 1366 switch (bytes) { 1367 case 8: *(u64 *)data = swab64(val); break; 1368 case 4: *(u32 *)data = swab32(val); break; 1369 case 2: *(u16 *)data = swab16(val); break; 1370 case 1: *(u8 *)data = val; break; 1371 } 1372 } 1373 1374 idx = srcu_read_lock(&vcpu->kvm->srcu); 1375 1376 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 1377 bytes, &run->mmio.data); 1378 1379 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1380 1381 if (!ret) { 1382 vcpu->mmio_needed = 0; 1383 return EMULATE_DONE; 1384 } 1385 1386 return EMULATE_DO_MMIO; 1387 } 1388 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 1389 1390 #ifdef CONFIG_VSX 1391 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 1392 { 1393 u32 dword_offset, word_offset; 1394 union kvmppc_one_reg reg; 1395 int vsx_offset = 0; 1396 int copy_type = vcpu->arch.mmio_copy_type; 1397 int result = 0; 1398 1399 switch (copy_type) { 1400 case KVMPPC_VSX_COPY_DWORD: 1401 vsx_offset = 1402 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1403 1404 if (vsx_offset == -1) { 1405 result = -1; 1406 break; 1407 } 1408 1409 if (rs < 32) { 1410 *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset); 1411 } else { 1412 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); 1413 *val = reg.vsxval[vsx_offset]; 1414 } 1415 break; 1416 1417 case KVMPPC_VSX_COPY_WORD: 1418 vsx_offset = 1419 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1420 1421 if (vsx_offset == -1) { 1422 result = -1; 1423 break; 1424 } 1425 1426 if (rs < 32) { 1427 dword_offset = vsx_offset / 2; 1428 word_offset = vsx_offset % 2; 1429 reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset); 1430 *val = reg.vsx32val[word_offset]; 1431 } else { 1432 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); 1433 *val = reg.vsx32val[vsx_offset]; 1434 } 1435 break; 1436 1437 default: 1438 result = -1; 1439 break; 1440 } 1441 1442 return result; 1443 } 1444 1445 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, 1446 int rs, unsigned int bytes, int is_default_endian) 1447 { 1448 u64 val; 1449 enum emulation_result emulated = EMULATE_DONE; 1450 1451 vcpu->arch.io_gpr = rs; 1452 1453 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ 1454 if (vcpu->arch.mmio_vsx_copy_nums > 4) 1455 return EMULATE_FAIL; 1456 1457 while (vcpu->arch.mmio_vsx_copy_nums) { 1458 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1459 return EMULATE_FAIL; 1460 1461 emulated = kvmppc_handle_store(vcpu, 1462 val, bytes, is_default_endian); 1463 1464 if (emulated != EMULATE_DONE) 1465 break; 1466 1467 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1468 1469 vcpu->arch.mmio_vsx_copy_nums--; 1470 vcpu->arch.mmio_vsx_offset++; 1471 } 1472 1473 return emulated; 1474 } 1475 1476 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) 1477 { 1478 struct kvm_run *run = vcpu->run; 1479 enum emulation_result emulated = EMULATE_FAIL; 1480 int r; 1481 1482 vcpu->arch.paddr_accessed += run->mmio.len; 1483 1484 if (!vcpu->mmio_is_write) { 1485 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, 1486 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1487 } else { 1488 emulated = kvmppc_handle_vsx_store(vcpu, 1489 vcpu->arch.io_gpr, run->mmio.len, 1); 1490 } 1491 1492 switch (emulated) { 1493 case EMULATE_DO_MMIO: 1494 run->exit_reason = KVM_EXIT_MMIO; 1495 r = RESUME_HOST; 1496 break; 1497 case EMULATE_FAIL: 1498 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1499 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1500 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1501 r = RESUME_HOST; 1502 break; 1503 default: 1504 r = RESUME_GUEST; 1505 break; 1506 } 1507 return r; 1508 } 1509 #endif /* CONFIG_VSX */ 1510 1511 #ifdef CONFIG_ALTIVEC 1512 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 1513 unsigned int rt, unsigned int bytes, int is_default_endian) 1514 { 1515 enum emulation_result emulated = EMULATE_DONE; 1516 1517 if (vcpu->arch.mmio_vmx_copy_nums > 2) 1518 return EMULATE_FAIL; 1519 1520 while (vcpu->arch.mmio_vmx_copy_nums) { 1521 emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1522 is_default_endian, 0); 1523 1524 if (emulated != EMULATE_DONE) 1525 break; 1526 1527 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1528 vcpu->arch.mmio_vmx_copy_nums--; 1529 vcpu->arch.mmio_vmx_offset++; 1530 } 1531 1532 return emulated; 1533 } 1534 1535 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) 1536 { 1537 union kvmppc_one_reg reg; 1538 int vmx_offset = 0; 1539 int result = 0; 1540 1541 vmx_offset = 1542 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1543 1544 if (vmx_offset == -1) 1545 return -1; 1546 1547 kvmppc_get_vsx_vr(vcpu, index, ®.vval); 1548 *val = reg.vsxval[vmx_offset]; 1549 1550 return result; 1551 } 1552 1553 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) 1554 { 1555 union kvmppc_one_reg reg; 1556 int vmx_offset = 0; 1557 int result = 0; 1558 1559 vmx_offset = 1560 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1561 1562 if (vmx_offset == -1) 1563 return -1; 1564 1565 kvmppc_get_vsx_vr(vcpu, index, ®.vval); 1566 *val = reg.vsx32val[vmx_offset]; 1567 1568 return result; 1569 } 1570 1571 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) 1572 { 1573 union kvmppc_one_reg reg; 1574 int vmx_offset = 0; 1575 int result = 0; 1576 1577 vmx_offset = 1578 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1579 1580 if (vmx_offset == -1) 1581 return -1; 1582 1583 kvmppc_get_vsx_vr(vcpu, index, ®.vval); 1584 *val = reg.vsx16val[vmx_offset]; 1585 1586 return result; 1587 } 1588 1589 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) 1590 { 1591 union kvmppc_one_reg reg; 1592 int vmx_offset = 0; 1593 int result = 0; 1594 1595 vmx_offset = 1596 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); 1597 1598 if (vmx_offset == -1) 1599 return -1; 1600 1601 kvmppc_get_vsx_vr(vcpu, index, ®.vval); 1602 *val = reg.vsx8val[vmx_offset]; 1603 1604 return result; 1605 } 1606 1607 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 1608 unsigned int rs, unsigned int bytes, int is_default_endian) 1609 { 1610 u64 val = 0; 1611 unsigned int index = rs & KVM_MMIO_REG_MASK; 1612 enum emulation_result emulated = EMULATE_DONE; 1613 1614 if (vcpu->arch.mmio_vmx_copy_nums > 2) 1615 return EMULATE_FAIL; 1616 1617 vcpu->arch.io_gpr = rs; 1618 1619 while (vcpu->arch.mmio_vmx_copy_nums) { 1620 switch (vcpu->arch.mmio_copy_type) { 1621 case KVMPPC_VMX_COPY_DWORD: 1622 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) 1623 return EMULATE_FAIL; 1624 1625 break; 1626 case KVMPPC_VMX_COPY_WORD: 1627 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) 1628 return EMULATE_FAIL; 1629 break; 1630 case KVMPPC_VMX_COPY_HWORD: 1631 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) 1632 return EMULATE_FAIL; 1633 break; 1634 case KVMPPC_VMX_COPY_BYTE: 1635 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) 1636 return EMULATE_FAIL; 1637 break; 1638 default: 1639 return EMULATE_FAIL; 1640 } 1641 1642 emulated = kvmppc_handle_store(vcpu, val, bytes, 1643 is_default_endian); 1644 if (emulated != EMULATE_DONE) 1645 break; 1646 1647 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1648 vcpu->arch.mmio_vmx_copy_nums--; 1649 vcpu->arch.mmio_vmx_offset++; 1650 } 1651 1652 return emulated; 1653 } 1654 1655 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) 1656 { 1657 struct kvm_run *run = vcpu->run; 1658 enum emulation_result emulated = EMULATE_FAIL; 1659 int r; 1660 1661 vcpu->arch.paddr_accessed += run->mmio.len; 1662 1663 if (!vcpu->mmio_is_write) { 1664 emulated = kvmppc_handle_vmx_load(vcpu, 1665 vcpu->arch.io_gpr, run->mmio.len, 1); 1666 } else { 1667 emulated = kvmppc_handle_vmx_store(vcpu, 1668 vcpu->arch.io_gpr, run->mmio.len, 1); 1669 } 1670 1671 switch (emulated) { 1672 case EMULATE_DO_MMIO: 1673 run->exit_reason = KVM_EXIT_MMIO; 1674 r = RESUME_HOST; 1675 break; 1676 case EMULATE_FAIL: 1677 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); 1678 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1679 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1680 r = RESUME_HOST; 1681 break; 1682 default: 1683 r = RESUME_GUEST; 1684 break; 1685 } 1686 return r; 1687 } 1688 #endif /* CONFIG_ALTIVEC */ 1689 1690 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1691 { 1692 int r = 0; 1693 union kvmppc_one_reg val; 1694 int size; 1695 1696 size = one_reg_size(reg->id); 1697 if (size > sizeof(val)) 1698 return -EINVAL; 1699 1700 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 1701 if (r == -EINVAL) { 1702 r = 0; 1703 switch (reg->id) { 1704 #ifdef CONFIG_ALTIVEC 1705 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1706 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1707 r = -ENXIO; 1708 break; 1709 } 1710 kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); 1711 break; 1712 case KVM_REG_PPC_VSCR: 1713 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1714 r = -ENXIO; 1715 break; 1716 } 1717 val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); 1718 break; 1719 case KVM_REG_PPC_VRSAVE: 1720 val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); 1721 break; 1722 #endif /* CONFIG_ALTIVEC */ 1723 default: 1724 r = -EINVAL; 1725 break; 1726 } 1727 } 1728 1729 if (r) 1730 return r; 1731 1732 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 1733 r = -EFAULT; 1734 1735 return r; 1736 } 1737 1738 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1739 { 1740 int r; 1741 union kvmppc_one_reg val; 1742 int size; 1743 1744 size = one_reg_size(reg->id); 1745 if (size > sizeof(val)) 1746 return -EINVAL; 1747 1748 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 1749 return -EFAULT; 1750 1751 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 1752 if (r == -EINVAL) { 1753 r = 0; 1754 switch (reg->id) { 1755 #ifdef CONFIG_ALTIVEC 1756 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 1757 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1758 r = -ENXIO; 1759 break; 1760 } 1761 kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); 1762 break; 1763 case KVM_REG_PPC_VSCR: 1764 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1765 r = -ENXIO; 1766 break; 1767 } 1768 kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); 1769 break; 1770 case KVM_REG_PPC_VRSAVE: 1771 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 1772 r = -ENXIO; 1773 break; 1774 } 1775 kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); 1776 break; 1777 #endif /* CONFIG_ALTIVEC */ 1778 default: 1779 r = -EINVAL; 1780 break; 1781 } 1782 } 1783 1784 return r; 1785 } 1786 1787 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 1788 { 1789 struct kvm_run *run = vcpu->run; 1790 int r; 1791 1792 vcpu_load(vcpu); 1793 1794 if (vcpu->mmio_needed) { 1795 vcpu->mmio_needed = 0; 1796 if (!vcpu->mmio_is_write) 1797 kvmppc_complete_mmio_load(vcpu); 1798 #ifdef CONFIG_VSX 1799 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1800 vcpu->arch.mmio_vsx_copy_nums--; 1801 vcpu->arch.mmio_vsx_offset++; 1802 } 1803 1804 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1805 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); 1806 if (r == RESUME_HOST) { 1807 vcpu->mmio_needed = 1; 1808 goto out; 1809 } 1810 } 1811 #endif 1812 #ifdef CONFIG_ALTIVEC 1813 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1814 vcpu->arch.mmio_vmx_copy_nums--; 1815 vcpu->arch.mmio_vmx_offset++; 1816 } 1817 1818 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1819 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); 1820 if (r == RESUME_HOST) { 1821 vcpu->mmio_needed = 1; 1822 goto out; 1823 } 1824 } 1825 #endif 1826 } else if (vcpu->arch.osi_needed) { 1827 u64 *gprs = run->osi.gprs; 1828 int i; 1829 1830 for (i = 0; i < 32; i++) 1831 kvmppc_set_gpr(vcpu, i, gprs[i]); 1832 vcpu->arch.osi_needed = 0; 1833 } else if (vcpu->arch.hcall_needed) { 1834 int i; 1835 1836 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1837 for (i = 0; i < 9; ++i) 1838 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1839 vcpu->arch.hcall_needed = 0; 1840 #ifdef CONFIG_BOOKE 1841 } else if (vcpu->arch.epr_needed) { 1842 kvmppc_set_epr(vcpu, run->epr.epr); 1843 vcpu->arch.epr_needed = 0; 1844 #endif 1845 } 1846 1847 kvm_sigset_activate(vcpu); 1848 1849 if (!vcpu->wants_to_run) 1850 r = -EINTR; 1851 else 1852 r = kvmppc_vcpu_run(vcpu); 1853 1854 kvm_sigset_deactivate(vcpu); 1855 1856 #ifdef CONFIG_ALTIVEC 1857 out: 1858 #endif 1859 1860 /* 1861 * We're already returning to userspace, don't pass the 1862 * RESUME_HOST flags along. 1863 */ 1864 if (r > 0) 1865 r = 0; 1866 1867 vcpu_put(vcpu); 1868 return r; 1869 } 1870 1871 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1872 { 1873 if (irq->irq == KVM_INTERRUPT_UNSET) { 1874 kvmppc_core_dequeue_external(vcpu); 1875 return 0; 1876 } 1877 1878 kvmppc_core_queue_external(vcpu, irq); 1879 1880 kvm_vcpu_kick(vcpu); 1881 1882 return 0; 1883 } 1884 1885 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1886 struct kvm_enable_cap *cap) 1887 { 1888 int r; 1889 1890 if (cap->flags) 1891 return -EINVAL; 1892 1893 switch (cap->cap) { 1894 case KVM_CAP_PPC_OSI: 1895 r = 0; 1896 vcpu->arch.osi_enabled = true; 1897 break; 1898 case KVM_CAP_PPC_PAPR: 1899 r = 0; 1900 vcpu->arch.papr_enabled = true; 1901 break; 1902 case KVM_CAP_PPC_EPR: 1903 r = 0; 1904 if (cap->args[0]) 1905 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1906 else 1907 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1908 break; 1909 #ifdef CONFIG_BOOKE 1910 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1911 r = 0; 1912 vcpu->arch.watchdog_enabled = true; 1913 break; 1914 #endif 1915 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1916 case KVM_CAP_SW_TLB: { 1917 struct kvm_config_tlb cfg; 1918 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1919 1920 r = -EFAULT; 1921 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1922 break; 1923 1924 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1925 break; 1926 } 1927 #endif 1928 #ifdef CONFIG_KVM_MPIC 1929 case KVM_CAP_IRQ_MPIC: { 1930 CLASS(fd, f)(cap->args[0]); 1931 struct kvm_device *dev; 1932 1933 r = -EBADF; 1934 if (fd_empty(f)) 1935 break; 1936 1937 r = -EPERM; 1938 dev = kvm_device_from_filp(fd_file(f)); 1939 if (dev) 1940 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1941 1942 break; 1943 } 1944 #endif 1945 #ifdef CONFIG_KVM_XICS 1946 case KVM_CAP_IRQ_XICS: { 1947 CLASS(fd, f)(cap->args[0]); 1948 struct kvm_device *dev; 1949 1950 r = -EBADF; 1951 if (fd_empty(f)) 1952 break; 1953 1954 r = -EPERM; 1955 dev = kvm_device_from_filp(fd_file(f)); 1956 if (dev) { 1957 if (xics_on_xive()) 1958 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); 1959 else 1960 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1961 } 1962 break; 1963 } 1964 #endif /* CONFIG_KVM_XICS */ 1965 #ifdef CONFIG_KVM_XIVE 1966 case KVM_CAP_PPC_IRQ_XIVE: { 1967 CLASS(fd, f)(cap->args[0]); 1968 struct kvm_device *dev; 1969 1970 r = -EBADF; 1971 if (fd_empty(f)) 1972 break; 1973 1974 r = -ENXIO; 1975 if (!xive_enabled()) 1976 break; 1977 1978 r = -EPERM; 1979 dev = kvm_device_from_filp(fd_file(f)); 1980 if (dev) 1981 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, 1982 cap->args[1]); 1983 break; 1984 } 1985 #endif /* CONFIG_KVM_XIVE */ 1986 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1987 case KVM_CAP_PPC_FWNMI: 1988 r = -EINVAL; 1989 if (!is_kvmppc_hv_enabled(vcpu->kvm)) 1990 break; 1991 r = 0; 1992 vcpu->kvm->arch.fwnmi_enabled = true; 1993 break; 1994 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 1995 default: 1996 r = -EINVAL; 1997 break; 1998 } 1999 2000 if (!r) 2001 r = kvmppc_sanity_check(vcpu); 2002 2003 return r; 2004 } 2005 2006 bool kvm_arch_intc_initialized(struct kvm *kvm) 2007 { 2008 #ifdef CONFIG_KVM_MPIC 2009 if (kvm->arch.mpic) 2010 return true; 2011 #endif 2012 #ifdef CONFIG_KVM_XICS 2013 if (kvm->arch.xics || kvm->arch.xive) 2014 return true; 2015 #endif 2016 return false; 2017 } 2018 2019 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 2020 struct kvm_mp_state *mp_state) 2021 { 2022 return -EINVAL; 2023 } 2024 2025 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 2026 struct kvm_mp_state *mp_state) 2027 { 2028 return -EINVAL; 2029 } 2030 2031 long kvm_arch_vcpu_async_ioctl(struct file *filp, 2032 unsigned int ioctl, unsigned long arg) 2033 { 2034 struct kvm_vcpu *vcpu = filp->private_data; 2035 void __user *argp = (void __user *)arg; 2036 2037 if (ioctl == KVM_INTERRUPT) { 2038 struct kvm_interrupt irq; 2039 if (copy_from_user(&irq, argp, sizeof(irq))) 2040 return -EFAULT; 2041 return kvm_vcpu_ioctl_interrupt(vcpu, &irq); 2042 } 2043 return -ENOIOCTLCMD; 2044 } 2045 2046 long kvm_arch_vcpu_ioctl(struct file *filp, 2047 unsigned int ioctl, unsigned long arg) 2048 { 2049 struct kvm_vcpu *vcpu = filp->private_data; 2050 void __user *argp = (void __user *)arg; 2051 long r; 2052 2053 switch (ioctl) { 2054 case KVM_ENABLE_CAP: 2055 { 2056 struct kvm_enable_cap cap; 2057 r = -EFAULT; 2058 if (copy_from_user(&cap, argp, sizeof(cap))) 2059 goto out; 2060 vcpu_load(vcpu); 2061 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2062 vcpu_put(vcpu); 2063 break; 2064 } 2065 2066 case KVM_SET_ONE_REG: 2067 case KVM_GET_ONE_REG: 2068 { 2069 struct kvm_one_reg reg; 2070 r = -EFAULT; 2071 if (copy_from_user(®, argp, sizeof(reg))) 2072 goto out; 2073 if (ioctl == KVM_SET_ONE_REG) 2074 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 2075 else 2076 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 2077 break; 2078 } 2079 2080 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 2081 case KVM_DIRTY_TLB: { 2082 struct kvm_dirty_tlb dirty; 2083 r = -EFAULT; 2084 if (copy_from_user(&dirty, argp, sizeof(dirty))) 2085 goto out; 2086 vcpu_load(vcpu); 2087 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 2088 vcpu_put(vcpu); 2089 break; 2090 } 2091 #endif 2092 default: 2093 r = -EINVAL; 2094 } 2095 2096 out: 2097 return r; 2098 } 2099 2100 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 2101 { 2102 return VM_FAULT_SIGBUS; 2103 } 2104 2105 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 2106 { 2107 u32 inst_nop = 0x60000000; 2108 #ifdef CONFIG_KVM_BOOKE_HV 2109 u32 inst_sc1 = 0x44000022; 2110 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 2111 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 2112 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 2113 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2114 #else 2115 u32 inst_lis = 0x3c000000; 2116 u32 inst_ori = 0x60000000; 2117 u32 inst_sc = 0x44000002; 2118 u32 inst_imm_mask = 0xffff; 2119 2120 /* 2121 * The hypercall to get into KVM from within guest context is as 2122 * follows: 2123 * 2124 * lis r0, r0, KVM_SC_MAGIC_R0@h 2125 * ori r0, KVM_SC_MAGIC_R0@l 2126 * sc 2127 * nop 2128 */ 2129 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 2130 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 2131 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 2132 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 2133 #endif 2134 2135 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 2136 2137 return 0; 2138 } 2139 2140 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) 2141 { 2142 int ret = 0; 2143 2144 #ifdef CONFIG_KVM_MPIC 2145 ret = ret || (kvm->arch.mpic != NULL); 2146 #endif 2147 #ifdef CONFIG_KVM_XICS 2148 ret = ret || (kvm->arch.xics != NULL); 2149 ret = ret || (kvm->arch.xive != NULL); 2150 #endif 2151 smp_rmb(); 2152 return ret; 2153 } 2154 2155 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 2156 bool line_status) 2157 { 2158 if (!kvm_arch_irqchip_in_kernel(kvm)) 2159 return -ENXIO; 2160 2161 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2162 irq_event->irq, irq_event->level, 2163 line_status); 2164 return 0; 2165 } 2166 2167 2168 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 2169 struct kvm_enable_cap *cap) 2170 { 2171 int r; 2172 2173 if (cap->flags) 2174 return -EINVAL; 2175 2176 switch (cap->cap) { 2177 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 2178 case KVM_CAP_PPC_ENABLE_HCALL: { 2179 unsigned long hcall = cap->args[0]; 2180 2181 r = -EINVAL; 2182 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 2183 cap->args[1] > 1) 2184 break; 2185 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 2186 break; 2187 if (cap->args[1]) 2188 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 2189 else 2190 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 2191 r = 0; 2192 break; 2193 } 2194 case KVM_CAP_PPC_SMT: { 2195 unsigned long mode = cap->args[0]; 2196 unsigned long flags = cap->args[1]; 2197 2198 r = -EINVAL; 2199 if (kvm->arch.kvm_ops->set_smt_mode) 2200 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); 2201 break; 2202 } 2203 2204 case KVM_CAP_PPC_NESTED_HV: 2205 r = -EINVAL; 2206 if (!is_kvmppc_hv_enabled(kvm) || 2207 !kvm->arch.kvm_ops->enable_nested) 2208 break; 2209 r = kvm->arch.kvm_ops->enable_nested(kvm); 2210 break; 2211 #endif 2212 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 2213 case KVM_CAP_PPC_SECURE_GUEST: 2214 r = -EINVAL; 2215 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) 2216 break; 2217 r = kvm->arch.kvm_ops->enable_svm(kvm); 2218 break; 2219 case KVM_CAP_PPC_DAWR1: 2220 r = -EINVAL; 2221 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) 2222 break; 2223 r = kvm->arch.kvm_ops->enable_dawr1(kvm); 2224 break; 2225 #endif 2226 default: 2227 r = -EINVAL; 2228 break; 2229 } 2230 2231 return r; 2232 } 2233 2234 #ifdef CONFIG_PPC_BOOK3S_64 2235 /* 2236 * These functions check whether the underlying hardware is safe 2237 * against attacks based on observing the effects of speculatively 2238 * executed instructions, and whether it supplies instructions for 2239 * use in workarounds. The information comes from firmware, either 2240 * via the device tree on powernv platforms or from an hcall on 2241 * pseries platforms. 2242 */ 2243 #ifdef CONFIG_PPC_PSERIES 2244 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2245 { 2246 struct h_cpu_char_result c; 2247 unsigned long rc; 2248 2249 if (!machine_is(pseries)) 2250 return -ENOTTY; 2251 2252 rc = plpar_get_cpu_characteristics(&c); 2253 if (rc == H_SUCCESS) { 2254 cp->character = c.character; 2255 cp->behaviour = c.behaviour; 2256 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2257 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2258 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2259 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2260 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2261 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | 2262 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | 2263 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | 2264 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2265 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2266 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2267 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | 2268 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2269 } 2270 return 0; 2271 } 2272 #else 2273 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2274 { 2275 return -ENOTTY; 2276 } 2277 #endif 2278 2279 static inline bool have_fw_feat(struct device_node *fw_features, 2280 const char *state, const char *name) 2281 { 2282 struct device_node *np; 2283 bool r = false; 2284 2285 np = of_get_child_by_name(fw_features, name); 2286 if (np) { 2287 r = of_property_read_bool(np, state); 2288 of_node_put(np); 2289 } 2290 return r; 2291 } 2292 2293 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) 2294 { 2295 struct device_node *np, *fw_features; 2296 int r; 2297 2298 memset(cp, 0, sizeof(*cp)); 2299 r = pseries_get_cpu_char(cp); 2300 if (r != -ENOTTY) 2301 return r; 2302 2303 np = of_find_node_by_name(NULL, "ibm,opal"); 2304 if (np) { 2305 fw_features = of_get_child_by_name(np, "fw-features"); 2306 of_node_put(np); 2307 if (!fw_features) 2308 return 0; 2309 if (have_fw_feat(fw_features, "enabled", 2310 "inst-spec-barrier-ori31,31,0")) 2311 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; 2312 if (have_fw_feat(fw_features, "enabled", 2313 "fw-bcctrl-serialized")) 2314 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; 2315 if (have_fw_feat(fw_features, "enabled", 2316 "inst-l1d-flush-ori30,30,0")) 2317 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; 2318 if (have_fw_feat(fw_features, "enabled", 2319 "inst-l1d-flush-trig2")) 2320 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; 2321 if (have_fw_feat(fw_features, "enabled", 2322 "fw-l1d-thread-split")) 2323 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; 2324 if (have_fw_feat(fw_features, "enabled", 2325 "fw-count-cache-disabled")) 2326 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; 2327 if (have_fw_feat(fw_features, "enabled", 2328 "fw-count-cache-flush-bcctr2,0,0")) 2329 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2330 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | 2331 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | 2332 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | 2333 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | 2334 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | 2335 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS | 2336 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; 2337 2338 if (have_fw_feat(fw_features, "enabled", 2339 "speculation-policy-favor-security")) 2340 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; 2341 if (!have_fw_feat(fw_features, "disabled", 2342 "needs-l1d-flush-msr-pr-0-to-1")) 2343 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; 2344 if (!have_fw_feat(fw_features, "disabled", 2345 "needs-spec-barrier-for-bound-checks")) 2346 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; 2347 if (have_fw_feat(fw_features, "enabled", 2348 "needs-count-cache-flush-on-context-switch")) 2349 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2350 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | 2351 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | 2352 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR | 2353 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; 2354 2355 of_node_put(fw_features); 2356 } 2357 2358 return 0; 2359 } 2360 #endif 2361 2362 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 2363 { 2364 struct kvm *kvm __maybe_unused = filp->private_data; 2365 void __user *argp = (void __user *)arg; 2366 int r; 2367 2368 switch (ioctl) { 2369 case KVM_PPC_GET_PVINFO: { 2370 struct kvm_ppc_pvinfo pvinfo; 2371 memset(&pvinfo, 0, sizeof(pvinfo)); 2372 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 2373 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 2374 r = -EFAULT; 2375 goto out; 2376 } 2377 2378 break; 2379 } 2380 #ifdef CONFIG_SPAPR_TCE_IOMMU 2381 case KVM_CREATE_SPAPR_TCE_64: { 2382 struct kvm_create_spapr_tce_64 create_tce_64; 2383 2384 r = -EFAULT; 2385 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 2386 goto out; 2387 if (create_tce_64.flags) { 2388 r = -EINVAL; 2389 goto out; 2390 } 2391 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2392 goto out; 2393 } 2394 case KVM_CREATE_SPAPR_TCE: { 2395 struct kvm_create_spapr_tce create_tce; 2396 struct kvm_create_spapr_tce_64 create_tce_64; 2397 2398 r = -EFAULT; 2399 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 2400 goto out; 2401 2402 create_tce_64.liobn = create_tce.liobn; 2403 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 2404 create_tce_64.offset = 0; 2405 create_tce_64.size = create_tce.window_size >> 2406 IOMMU_PAGE_SHIFT_4K; 2407 create_tce_64.flags = 0; 2408 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 2409 goto out; 2410 } 2411 #endif 2412 #ifdef CONFIG_PPC_BOOK3S_64 2413 case KVM_PPC_GET_SMMU_INFO: { 2414 struct kvm_ppc_smmu_info info; 2415 struct kvm *kvm = filp->private_data; 2416 2417 memset(&info, 0, sizeof(info)); 2418 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 2419 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2420 r = -EFAULT; 2421 break; 2422 } 2423 case KVM_PPC_RTAS_DEFINE_TOKEN: { 2424 struct kvm *kvm = filp->private_data; 2425 2426 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 2427 break; 2428 } 2429 case KVM_PPC_CONFIGURE_V3_MMU: { 2430 struct kvm *kvm = filp->private_data; 2431 struct kvm_ppc_mmuv3_cfg cfg; 2432 2433 r = -EINVAL; 2434 if (!kvm->arch.kvm_ops->configure_mmu) 2435 goto out; 2436 r = -EFAULT; 2437 if (copy_from_user(&cfg, argp, sizeof(cfg))) 2438 goto out; 2439 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); 2440 break; 2441 } 2442 case KVM_PPC_GET_RMMU_INFO: { 2443 struct kvm *kvm = filp->private_data; 2444 struct kvm_ppc_rmmu_info info; 2445 2446 r = -EINVAL; 2447 if (!kvm->arch.kvm_ops->get_rmmu_info) 2448 goto out; 2449 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); 2450 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 2451 r = -EFAULT; 2452 break; 2453 } 2454 case KVM_PPC_GET_CPU_CHAR: { 2455 struct kvm_ppc_cpu_char cpuchar; 2456 2457 r = kvmppc_get_cpu_char(&cpuchar); 2458 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) 2459 r = -EFAULT; 2460 break; 2461 } 2462 case KVM_PPC_SVM_OFF: { 2463 struct kvm *kvm = filp->private_data; 2464 2465 r = 0; 2466 if (!kvm->arch.kvm_ops->svm_off) 2467 goto out; 2468 2469 r = kvm->arch.kvm_ops->svm_off(kvm); 2470 break; 2471 } 2472 default: { 2473 struct kvm *kvm = filp->private_data; 2474 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 2475 } 2476 #else /* CONFIG_PPC_BOOK3S_64 */ 2477 default: 2478 r = -ENOTTY; 2479 #endif 2480 } 2481 out: 2482 return r; 2483 } 2484 2485 static DEFINE_IDA(lpid_inuse); 2486 static unsigned long nr_lpids; 2487 2488 long kvmppc_alloc_lpid(void) 2489 { 2490 int lpid; 2491 2492 /* The host LPID must always be 0 (allocation starts at 1) */ 2493 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL); 2494 if (lpid < 0) { 2495 if (lpid == -ENOMEM) 2496 pr_err("%s: Out of memory\n", __func__); 2497 else 2498 pr_err("%s: No LPIDs free\n", __func__); 2499 return -ENOMEM; 2500 } 2501 2502 return lpid; 2503 } 2504 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 2505 2506 void kvmppc_free_lpid(long lpid) 2507 { 2508 ida_free(&lpid_inuse, lpid); 2509 } 2510 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 2511 2512 /* nr_lpids_param includes the host LPID */ 2513 void kvmppc_init_lpid(unsigned long nr_lpids_param) 2514 { 2515 nr_lpids = nr_lpids_param; 2516 } 2517 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 2518 2519 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 2520 2521 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) 2522 { 2523 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) 2524 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); 2525 } 2526 2527 void kvm_arch_create_vm_debugfs(struct kvm *kvm) 2528 { 2529 if (kvm->arch.kvm_ops->create_vm_debugfs) 2530 kvm->arch.kvm_ops->create_vm_debugfs(kvm); 2531 } 2532