1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/vmalloc.h> 25 #include <linux/hrtimer.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/module.h> 30 #include <asm/cputable.h> 31 #include <asm/uaccess.h> 32 #include <asm/kvm_ppc.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cputhreads.h> 35 #include <asm/irqflags.h> 36 #include <asm/iommu.h> 37 #include "timing.h" 38 #include "irq.h" 39 #include "../mm/mmu_decl.h" 40 41 #define CREATE_TRACE_POINTS 42 #include "trace.h" 43 44 struct kvmppc_ops *kvmppc_hv_ops; 45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops); 46 struct kvmppc_ops *kvmppc_pr_ops; 47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops); 48 49 50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 51 { 52 return !!(v->arch.pending_exceptions) || 53 v->requests; 54 } 55 56 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 57 { 58 return 1; 59 } 60 61 /* 62 * Common checks before entering the guest world. Call with interrupts 63 * disabled. 64 * 65 * returns: 66 * 67 * == 1 if we're ready to go into guest state 68 * <= 0 if we need to go back to the host with return value 69 */ 70 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 71 { 72 int r; 73 74 WARN_ON(irqs_disabled()); 75 hard_irq_disable(); 76 77 while (true) { 78 if (need_resched()) { 79 local_irq_enable(); 80 cond_resched(); 81 hard_irq_disable(); 82 continue; 83 } 84 85 if (signal_pending(current)) { 86 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 87 vcpu->run->exit_reason = KVM_EXIT_INTR; 88 r = -EINTR; 89 break; 90 } 91 92 vcpu->mode = IN_GUEST_MODE; 93 94 /* 95 * Reading vcpu->requests must happen after setting vcpu->mode, 96 * so we don't miss a request because the requester sees 97 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests 98 * before next entering the guest (and thus doesn't IPI). 99 */ 100 smp_mb(); 101 102 if (vcpu->requests) { 103 /* Make sure we process requests preemptable */ 104 local_irq_enable(); 105 trace_kvm_check_requests(vcpu); 106 r = kvmppc_core_check_requests(vcpu); 107 hard_irq_disable(); 108 if (r > 0) 109 continue; 110 break; 111 } 112 113 if (kvmppc_core_prepare_to_enter(vcpu)) { 114 /* interrupts got enabled in between, so we 115 are back at square 1 */ 116 continue; 117 } 118 119 __kvm_guest_enter(); 120 return 1; 121 } 122 123 /* return to host */ 124 local_irq_enable(); 125 return r; 126 } 127 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 128 129 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 130 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) 131 { 132 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 133 int i; 134 135 shared->sprg0 = swab64(shared->sprg0); 136 shared->sprg1 = swab64(shared->sprg1); 137 shared->sprg2 = swab64(shared->sprg2); 138 shared->sprg3 = swab64(shared->sprg3); 139 shared->srr0 = swab64(shared->srr0); 140 shared->srr1 = swab64(shared->srr1); 141 shared->dar = swab64(shared->dar); 142 shared->msr = swab64(shared->msr); 143 shared->dsisr = swab32(shared->dsisr); 144 shared->int_pending = swab32(shared->int_pending); 145 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) 146 shared->sr[i] = swab32(shared->sr[i]); 147 } 148 #endif 149 150 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 151 { 152 int nr = kvmppc_get_gpr(vcpu, 11); 153 int r; 154 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 155 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 156 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 157 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 158 unsigned long r2 = 0; 159 160 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { 161 /* 32 bit mode */ 162 param1 &= 0xffffffff; 163 param2 &= 0xffffffff; 164 param3 &= 0xffffffff; 165 param4 &= 0xffffffff; 166 } 167 168 switch (nr) { 169 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): 170 { 171 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 172 /* Book3S can be little endian, find it out here */ 173 int shared_big_endian = true; 174 if (vcpu->arch.intr_msr & MSR_LE) 175 shared_big_endian = false; 176 if (shared_big_endian != vcpu->arch.shared_big_endian) 177 kvmppc_swab_shared(vcpu); 178 vcpu->arch.shared_big_endian = shared_big_endian; 179 #endif 180 181 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { 182 /* 183 * Older versions of the Linux magic page code had 184 * a bug where they would map their trampoline code 185 * NX. If that's the case, remove !PR NX capability. 186 */ 187 vcpu->arch.disable_kernel_nx = true; 188 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 189 } 190 191 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; 192 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; 193 194 #ifdef CONFIG_PPC_64K_PAGES 195 /* 196 * Make sure our 4k magic page is in the same window of a 64k 197 * page within the guest and within the host's page. 198 */ 199 if ((vcpu->arch.magic_page_pa & 0xf000) != 200 ((ulong)vcpu->arch.shared & 0xf000)) { 201 void *old_shared = vcpu->arch.shared; 202 ulong shared = (ulong)vcpu->arch.shared; 203 void *new_shared; 204 205 shared &= PAGE_MASK; 206 shared |= vcpu->arch.magic_page_pa & 0xf000; 207 new_shared = (void*)shared; 208 memcpy(new_shared, old_shared, 0x1000); 209 vcpu->arch.shared = new_shared; 210 } 211 #endif 212 213 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 214 215 r = EV_SUCCESS; 216 break; 217 } 218 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 219 r = EV_SUCCESS; 220 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 221 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 222 #endif 223 224 /* Second return value is in r4 */ 225 break; 226 case EV_HCALL_TOKEN(EV_IDLE): 227 r = EV_SUCCESS; 228 kvm_vcpu_block(vcpu); 229 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 230 break; 231 default: 232 r = EV_UNIMPLEMENTED; 233 break; 234 } 235 236 kvmppc_set_gpr(vcpu, 4, r2); 237 238 return r; 239 } 240 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); 241 242 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 243 { 244 int r = false; 245 246 /* We have to know what CPU to virtualize */ 247 if (!vcpu->arch.pvr) 248 goto out; 249 250 /* PAPR only works with book3s_64 */ 251 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 252 goto out; 253 254 /* HV KVM can only do PAPR mode for now */ 255 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) 256 goto out; 257 258 #ifdef CONFIG_KVM_BOOKE_HV 259 if (!cpu_has_feature(CPU_FTR_EMB_HV)) 260 goto out; 261 #endif 262 263 r = true; 264 265 out: 266 vcpu->arch.sane = r; 267 return r ? 0 : -EINVAL; 268 } 269 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 270 271 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 272 { 273 enum emulation_result er; 274 int r; 275 276 er = kvmppc_emulate_loadstore(vcpu); 277 switch (er) { 278 case EMULATE_DONE: 279 /* Future optimization: only reload non-volatiles if they were 280 * actually modified. */ 281 r = RESUME_GUEST_NV; 282 break; 283 case EMULATE_AGAIN: 284 r = RESUME_GUEST; 285 break; 286 case EMULATE_DO_MMIO: 287 run->exit_reason = KVM_EXIT_MMIO; 288 /* We must reload nonvolatiles because "update" load/store 289 * instructions modify register state. */ 290 /* Future optimization: only reload non-volatiles if they were 291 * actually modified. */ 292 r = RESUME_HOST_NV; 293 break; 294 case EMULATE_FAIL: 295 { 296 u32 last_inst; 297 298 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 299 /* XXX Deliver Program interrupt to guest. */ 300 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); 301 r = RESUME_HOST; 302 break; 303 } 304 default: 305 WARN_ON(1); 306 r = RESUME_GUEST; 307 } 308 309 return r; 310 } 311 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); 312 313 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 314 bool data) 315 { 316 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 317 struct kvmppc_pte pte; 318 int r; 319 320 vcpu->stat.st++; 321 322 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 323 XLATE_WRITE, &pte); 324 if (r < 0) 325 return r; 326 327 *eaddr = pte.raddr; 328 329 if (!pte.may_write) 330 return -EPERM; 331 332 /* Magic page override */ 333 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 334 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 335 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 336 void *magic = vcpu->arch.shared; 337 magic += pte.eaddr & 0xfff; 338 memcpy(magic, ptr, size); 339 return EMULATE_DONE; 340 } 341 342 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 343 return EMULATE_DO_MMIO; 344 345 return EMULATE_DONE; 346 } 347 EXPORT_SYMBOL_GPL(kvmppc_st); 348 349 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 350 bool data) 351 { 352 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; 353 struct kvmppc_pte pte; 354 int rc; 355 356 vcpu->stat.ld++; 357 358 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, 359 XLATE_READ, &pte); 360 if (rc) 361 return rc; 362 363 *eaddr = pte.raddr; 364 365 if (!pte.may_read) 366 return -EPERM; 367 368 if (!data && !pte.may_execute) 369 return -ENOEXEC; 370 371 /* Magic page override */ 372 if (kvmppc_supports_magic_page(vcpu) && mp_pa && 373 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && 374 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 375 void *magic = vcpu->arch.shared; 376 magic += pte.eaddr & 0xfff; 377 memcpy(ptr, magic, size); 378 return EMULATE_DONE; 379 } 380 381 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) 382 return EMULATE_DO_MMIO; 383 384 return EMULATE_DONE; 385 } 386 EXPORT_SYMBOL_GPL(kvmppc_ld); 387 388 int kvm_arch_hardware_enable(void) 389 { 390 return 0; 391 } 392 393 int kvm_arch_hardware_setup(void) 394 { 395 return 0; 396 } 397 398 void kvm_arch_check_processor_compat(void *rtn) 399 { 400 *(int *)rtn = kvmppc_core_check_processor_compat(); 401 } 402 403 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 404 { 405 struct kvmppc_ops *kvm_ops = NULL; 406 /* 407 * if we have both HV and PR enabled, default is HV 408 */ 409 if (type == 0) { 410 if (kvmppc_hv_ops) 411 kvm_ops = kvmppc_hv_ops; 412 else 413 kvm_ops = kvmppc_pr_ops; 414 if (!kvm_ops) 415 goto err_out; 416 } else if (type == KVM_VM_PPC_HV) { 417 if (!kvmppc_hv_ops) 418 goto err_out; 419 kvm_ops = kvmppc_hv_ops; 420 } else if (type == KVM_VM_PPC_PR) { 421 if (!kvmppc_pr_ops) 422 goto err_out; 423 kvm_ops = kvmppc_pr_ops; 424 } else 425 goto err_out; 426 427 if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) 428 return -ENOENT; 429 430 kvm->arch.kvm_ops = kvm_ops; 431 return kvmppc_core_init_vm(kvm); 432 err_out: 433 return -EINVAL; 434 } 435 436 void kvm_arch_destroy_vm(struct kvm *kvm) 437 { 438 unsigned int i; 439 struct kvm_vcpu *vcpu; 440 441 #ifdef CONFIG_KVM_XICS 442 /* 443 * We call kick_all_cpus_sync() to ensure that all 444 * CPUs have executed any pending IPIs before we 445 * continue and free VCPUs structures below. 446 */ 447 if (is_kvmppc_hv_enabled(kvm)) 448 kick_all_cpus_sync(); 449 #endif 450 451 kvm_for_each_vcpu(i, vcpu, kvm) 452 kvm_arch_vcpu_free(vcpu); 453 454 mutex_lock(&kvm->lock); 455 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 456 kvm->vcpus[i] = NULL; 457 458 atomic_set(&kvm->online_vcpus, 0); 459 460 kvmppc_core_destroy_vm(kvm); 461 462 mutex_unlock(&kvm->lock); 463 464 /* drop the module reference */ 465 module_put(kvm->arch.kvm_ops->owner); 466 } 467 468 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 469 { 470 int r; 471 /* Assume we're using HV mode when the HV module is loaded */ 472 int hv_enabled = kvmppc_hv_ops ? 1 : 0; 473 474 if (kvm) { 475 /* 476 * Hooray - we know which VM type we're running on. Depend on 477 * that rather than the guess above. 478 */ 479 hv_enabled = is_kvmppc_hv_enabled(kvm); 480 } 481 482 switch (ext) { 483 #ifdef CONFIG_BOOKE 484 case KVM_CAP_PPC_BOOKE_SREGS: 485 case KVM_CAP_PPC_BOOKE_WATCHDOG: 486 case KVM_CAP_PPC_EPR: 487 #else 488 case KVM_CAP_PPC_SEGSTATE: 489 case KVM_CAP_PPC_HIOR: 490 case KVM_CAP_PPC_PAPR: 491 #endif 492 case KVM_CAP_PPC_UNSET_IRQ: 493 case KVM_CAP_PPC_IRQ_LEVEL: 494 case KVM_CAP_ENABLE_CAP: 495 case KVM_CAP_ENABLE_CAP_VM: 496 case KVM_CAP_ONE_REG: 497 case KVM_CAP_IOEVENTFD: 498 case KVM_CAP_DEVICE_CTRL: 499 r = 1; 500 break; 501 case KVM_CAP_PPC_PAIRED_SINGLES: 502 case KVM_CAP_PPC_OSI: 503 case KVM_CAP_PPC_GET_PVINFO: 504 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 505 case KVM_CAP_SW_TLB: 506 #endif 507 /* We support this only for PR */ 508 r = !hv_enabled; 509 break; 510 #ifdef CONFIG_KVM_MMIO 511 case KVM_CAP_COALESCED_MMIO: 512 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 513 break; 514 #endif 515 #ifdef CONFIG_KVM_MPIC 516 case KVM_CAP_IRQ_MPIC: 517 r = 1; 518 break; 519 #endif 520 521 #ifdef CONFIG_PPC_BOOK3S_64 522 case KVM_CAP_SPAPR_TCE: 523 case KVM_CAP_SPAPR_TCE_64: 524 case KVM_CAP_PPC_ALLOC_HTAB: 525 case KVM_CAP_PPC_RTAS: 526 case KVM_CAP_PPC_FIXUP_HCALL: 527 case KVM_CAP_PPC_ENABLE_HCALL: 528 #ifdef CONFIG_KVM_XICS 529 case KVM_CAP_IRQ_XICS: 530 #endif 531 r = 1; 532 break; 533 #endif /* CONFIG_PPC_BOOK3S_64 */ 534 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 535 case KVM_CAP_PPC_SMT: 536 if (hv_enabled) 537 r = threads_per_subcore; 538 else 539 r = 0; 540 break; 541 case KVM_CAP_PPC_RMA: 542 r = 0; 543 break; 544 case KVM_CAP_PPC_HWRNG: 545 r = kvmppc_hwrng_present(); 546 break; 547 #endif 548 case KVM_CAP_SYNC_MMU: 549 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 550 r = hv_enabled; 551 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) 552 r = 1; 553 #else 554 r = 0; 555 #endif 556 break; 557 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 558 case KVM_CAP_PPC_HTAB_FD: 559 r = hv_enabled; 560 break; 561 #endif 562 case KVM_CAP_NR_VCPUS: 563 /* 564 * Recommending a number of CPUs is somewhat arbitrary; we 565 * return the number of present CPUs for -HV (since a host 566 * will have secondary threads "offline"), and for other KVM 567 * implementations just count online CPUs. 568 */ 569 if (hv_enabled) 570 r = num_present_cpus(); 571 else 572 r = num_online_cpus(); 573 break; 574 case KVM_CAP_NR_MEMSLOTS: 575 r = KVM_USER_MEM_SLOTS; 576 break; 577 case KVM_CAP_MAX_VCPUS: 578 r = KVM_MAX_VCPUS; 579 break; 580 #ifdef CONFIG_PPC_BOOK3S_64 581 case KVM_CAP_PPC_GET_SMMU_INFO: 582 r = 1; 583 break; 584 case KVM_CAP_SPAPR_MULTITCE: 585 r = 1; 586 break; 587 #endif 588 default: 589 r = 0; 590 break; 591 } 592 return r; 593 594 } 595 596 long kvm_arch_dev_ioctl(struct file *filp, 597 unsigned int ioctl, unsigned long arg) 598 { 599 return -EINVAL; 600 } 601 602 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 603 struct kvm_memory_slot *dont) 604 { 605 kvmppc_core_free_memslot(kvm, free, dont); 606 } 607 608 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 609 unsigned long npages) 610 { 611 return kvmppc_core_create_memslot(kvm, slot, npages); 612 } 613 614 int kvm_arch_prepare_memory_region(struct kvm *kvm, 615 struct kvm_memory_slot *memslot, 616 const struct kvm_userspace_memory_region *mem, 617 enum kvm_mr_change change) 618 { 619 return kvmppc_core_prepare_memory_region(kvm, memslot, mem); 620 } 621 622 void kvm_arch_commit_memory_region(struct kvm *kvm, 623 const struct kvm_userspace_memory_region *mem, 624 const struct kvm_memory_slot *old, 625 const struct kvm_memory_slot *new, 626 enum kvm_mr_change change) 627 { 628 kvmppc_core_commit_memory_region(kvm, mem, old, new); 629 } 630 631 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 632 struct kvm_memory_slot *slot) 633 { 634 kvmppc_core_flush_memslot(kvm, slot); 635 } 636 637 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 638 { 639 struct kvm_vcpu *vcpu; 640 vcpu = kvmppc_core_vcpu_create(kvm, id); 641 if (!IS_ERR(vcpu)) { 642 vcpu->arch.wqp = &vcpu->wq; 643 kvmppc_create_vcpu_debugfs(vcpu, id); 644 } 645 return vcpu; 646 } 647 648 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 649 { 650 } 651 652 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 653 { 654 /* Make sure we're not using the vcpu anymore */ 655 hrtimer_cancel(&vcpu->arch.dec_timer); 656 657 kvmppc_remove_vcpu_debugfs(vcpu); 658 659 switch (vcpu->arch.irq_type) { 660 case KVMPPC_IRQ_MPIC: 661 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); 662 break; 663 case KVMPPC_IRQ_XICS: 664 kvmppc_xics_free_icp(vcpu); 665 break; 666 } 667 668 kvmppc_core_vcpu_free(vcpu); 669 } 670 671 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 672 { 673 kvm_arch_vcpu_free(vcpu); 674 } 675 676 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 677 { 678 return kvmppc_core_pending_dec(vcpu); 679 } 680 681 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 682 { 683 struct kvm_vcpu *vcpu; 684 685 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 686 kvmppc_decrementer_func(vcpu); 687 688 return HRTIMER_NORESTART; 689 } 690 691 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 692 { 693 int ret; 694 695 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 696 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 697 vcpu->arch.dec_expires = ~(u64)0; 698 699 #ifdef CONFIG_KVM_EXIT_TIMING 700 mutex_init(&vcpu->arch.exit_timing_lock); 701 #endif 702 ret = kvmppc_subarch_vcpu_init(vcpu); 703 return ret; 704 } 705 706 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 707 { 708 kvmppc_mmu_destroy(vcpu); 709 kvmppc_subarch_vcpu_uninit(vcpu); 710 } 711 712 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 713 { 714 #ifdef CONFIG_BOOKE 715 /* 716 * vrsave (formerly usprg0) isn't used by Linux, but may 717 * be used by the guest. 718 * 719 * On non-booke this is associated with Altivec and 720 * is handled by code in book3s.c. 721 */ 722 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 723 #endif 724 kvmppc_core_vcpu_load(vcpu, cpu); 725 } 726 727 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 728 { 729 kvmppc_core_vcpu_put(vcpu); 730 #ifdef CONFIG_BOOKE 731 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 732 #endif 733 } 734 735 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 736 struct kvm_run *run) 737 { 738 u64 uninitialized_var(gpr); 739 740 if (run->mmio.len > sizeof(gpr)) { 741 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 742 return; 743 } 744 745 if (!vcpu->arch.mmio_host_swabbed) { 746 switch (run->mmio.len) { 747 case 8: gpr = *(u64 *)run->mmio.data; break; 748 case 4: gpr = *(u32 *)run->mmio.data; break; 749 case 2: gpr = *(u16 *)run->mmio.data; break; 750 case 1: gpr = *(u8 *)run->mmio.data; break; 751 } 752 } else { 753 switch (run->mmio.len) { 754 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; 755 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; 756 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; 757 case 1: gpr = *(u8 *)run->mmio.data; break; 758 } 759 } 760 761 if (vcpu->arch.mmio_sign_extend) { 762 switch (run->mmio.len) { 763 #ifdef CONFIG_PPC64 764 case 4: 765 gpr = (s64)(s32)gpr; 766 break; 767 #endif 768 case 2: 769 gpr = (s64)(s16)gpr; 770 break; 771 case 1: 772 gpr = (s64)(s8)gpr; 773 break; 774 } 775 } 776 777 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 778 779 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 780 case KVM_MMIO_REG_GPR: 781 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 782 break; 783 case KVM_MMIO_REG_FPR: 784 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 785 break; 786 #ifdef CONFIG_PPC_BOOK3S 787 case KVM_MMIO_REG_QPR: 788 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 789 break; 790 case KVM_MMIO_REG_FQPR: 791 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 792 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 793 break; 794 #endif 795 default: 796 BUG(); 797 } 798 } 799 800 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 801 unsigned int rt, unsigned int bytes, 802 int is_default_endian) 803 { 804 int idx, ret; 805 bool host_swabbed; 806 807 /* Pity C doesn't have a logical XOR operator */ 808 if (kvmppc_need_byteswap(vcpu)) { 809 host_swabbed = is_default_endian; 810 } else { 811 host_swabbed = !is_default_endian; 812 } 813 814 if (bytes > sizeof(run->mmio.data)) { 815 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 816 run->mmio.len); 817 } 818 819 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 820 run->mmio.len = bytes; 821 run->mmio.is_write = 0; 822 823 vcpu->arch.io_gpr = rt; 824 vcpu->arch.mmio_host_swabbed = host_swabbed; 825 vcpu->mmio_needed = 1; 826 vcpu->mmio_is_write = 0; 827 vcpu->arch.mmio_sign_extend = 0; 828 829 idx = srcu_read_lock(&vcpu->kvm->srcu); 830 831 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 832 bytes, &run->mmio.data); 833 834 srcu_read_unlock(&vcpu->kvm->srcu, idx); 835 836 if (!ret) { 837 kvmppc_complete_mmio_load(vcpu, run); 838 vcpu->mmio_needed = 0; 839 return EMULATE_DONE; 840 } 841 842 return EMULATE_DO_MMIO; 843 } 844 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 845 846 /* Same as above, but sign extends */ 847 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 848 unsigned int rt, unsigned int bytes, 849 int is_default_endian) 850 { 851 int r; 852 853 vcpu->arch.mmio_sign_extend = 1; 854 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); 855 856 return r; 857 } 858 859 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 860 u64 val, unsigned int bytes, int is_default_endian) 861 { 862 void *data = run->mmio.data; 863 int idx, ret; 864 bool host_swabbed; 865 866 /* Pity C doesn't have a logical XOR operator */ 867 if (kvmppc_need_byteswap(vcpu)) { 868 host_swabbed = is_default_endian; 869 } else { 870 host_swabbed = !is_default_endian; 871 } 872 873 if (bytes > sizeof(run->mmio.data)) { 874 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 875 run->mmio.len); 876 } 877 878 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 879 run->mmio.len = bytes; 880 run->mmio.is_write = 1; 881 vcpu->mmio_needed = 1; 882 vcpu->mmio_is_write = 1; 883 884 /* Store the value at the lowest bytes in 'data'. */ 885 if (!host_swabbed) { 886 switch (bytes) { 887 case 8: *(u64 *)data = val; break; 888 case 4: *(u32 *)data = val; break; 889 case 2: *(u16 *)data = val; break; 890 case 1: *(u8 *)data = val; break; 891 } 892 } else { 893 switch (bytes) { 894 case 8: *(u64 *)data = swab64(val); break; 895 case 4: *(u32 *)data = swab32(val); break; 896 case 2: *(u16 *)data = swab16(val); break; 897 case 1: *(u8 *)data = val; break; 898 } 899 } 900 901 idx = srcu_read_lock(&vcpu->kvm->srcu); 902 903 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, 904 bytes, &run->mmio.data); 905 906 srcu_read_unlock(&vcpu->kvm->srcu, idx); 907 908 if (!ret) { 909 vcpu->mmio_needed = 0; 910 return EMULATE_DONE; 911 } 912 913 return EMULATE_DO_MMIO; 914 } 915 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 916 917 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 918 { 919 int r = 0; 920 union kvmppc_one_reg val; 921 int size; 922 923 size = one_reg_size(reg->id); 924 if (size > sizeof(val)) 925 return -EINVAL; 926 927 r = kvmppc_get_one_reg(vcpu, reg->id, &val); 928 if (r == -EINVAL) { 929 r = 0; 930 switch (reg->id) { 931 #ifdef CONFIG_ALTIVEC 932 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 933 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 934 r = -ENXIO; 935 break; 936 } 937 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; 938 break; 939 case KVM_REG_PPC_VSCR: 940 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 941 r = -ENXIO; 942 break; 943 } 944 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); 945 break; 946 case KVM_REG_PPC_VRSAVE: 947 val = get_reg_val(reg->id, vcpu->arch.vrsave); 948 break; 949 #endif /* CONFIG_ALTIVEC */ 950 default: 951 r = -EINVAL; 952 break; 953 } 954 } 955 956 if (r) 957 return r; 958 959 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) 960 r = -EFAULT; 961 962 return r; 963 } 964 965 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 966 { 967 int r; 968 union kvmppc_one_reg val; 969 int size; 970 971 size = one_reg_size(reg->id); 972 if (size > sizeof(val)) 973 return -EINVAL; 974 975 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) 976 return -EFAULT; 977 978 r = kvmppc_set_one_reg(vcpu, reg->id, &val); 979 if (r == -EINVAL) { 980 r = 0; 981 switch (reg->id) { 982 #ifdef CONFIG_ALTIVEC 983 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 984 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 985 r = -ENXIO; 986 break; 987 } 988 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 989 break; 990 case KVM_REG_PPC_VSCR: 991 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 992 r = -ENXIO; 993 break; 994 } 995 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); 996 break; 997 case KVM_REG_PPC_VRSAVE: 998 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 999 r = -ENXIO; 1000 break; 1001 } 1002 vcpu->arch.vrsave = set_reg_val(reg->id, val); 1003 break; 1004 #endif /* CONFIG_ALTIVEC */ 1005 default: 1006 r = -EINVAL; 1007 break; 1008 } 1009 } 1010 1011 return r; 1012 } 1013 1014 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 1015 { 1016 int r; 1017 sigset_t sigsaved; 1018 1019 if (vcpu->sigset_active) 1020 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1021 1022 if (vcpu->mmio_needed) { 1023 if (!vcpu->mmio_is_write) 1024 kvmppc_complete_mmio_load(vcpu, run); 1025 vcpu->mmio_needed = 0; 1026 } else if (vcpu->arch.osi_needed) { 1027 u64 *gprs = run->osi.gprs; 1028 int i; 1029 1030 for (i = 0; i < 32; i++) 1031 kvmppc_set_gpr(vcpu, i, gprs[i]); 1032 vcpu->arch.osi_needed = 0; 1033 } else if (vcpu->arch.hcall_needed) { 1034 int i; 1035 1036 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 1037 for (i = 0; i < 9; ++i) 1038 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 1039 vcpu->arch.hcall_needed = 0; 1040 #ifdef CONFIG_BOOKE 1041 } else if (vcpu->arch.epr_needed) { 1042 kvmppc_set_epr(vcpu, run->epr.epr); 1043 vcpu->arch.epr_needed = 0; 1044 #endif 1045 } 1046 1047 r = kvmppc_vcpu_run(run, vcpu); 1048 1049 if (vcpu->sigset_active) 1050 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1051 1052 return r; 1053 } 1054 1055 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 1056 { 1057 if (irq->irq == KVM_INTERRUPT_UNSET) { 1058 kvmppc_core_dequeue_external(vcpu); 1059 return 0; 1060 } 1061 1062 kvmppc_core_queue_external(vcpu, irq); 1063 1064 kvm_vcpu_kick(vcpu); 1065 1066 return 0; 1067 } 1068 1069 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1070 struct kvm_enable_cap *cap) 1071 { 1072 int r; 1073 1074 if (cap->flags) 1075 return -EINVAL; 1076 1077 switch (cap->cap) { 1078 case KVM_CAP_PPC_OSI: 1079 r = 0; 1080 vcpu->arch.osi_enabled = true; 1081 break; 1082 case KVM_CAP_PPC_PAPR: 1083 r = 0; 1084 vcpu->arch.papr_enabled = true; 1085 break; 1086 case KVM_CAP_PPC_EPR: 1087 r = 0; 1088 if (cap->args[0]) 1089 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; 1090 else 1091 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; 1092 break; 1093 #ifdef CONFIG_BOOKE 1094 case KVM_CAP_PPC_BOOKE_WATCHDOG: 1095 r = 0; 1096 vcpu->arch.watchdog_enabled = true; 1097 break; 1098 #endif 1099 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1100 case KVM_CAP_SW_TLB: { 1101 struct kvm_config_tlb cfg; 1102 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 1103 1104 r = -EFAULT; 1105 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 1106 break; 1107 1108 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 1109 break; 1110 } 1111 #endif 1112 #ifdef CONFIG_KVM_MPIC 1113 case KVM_CAP_IRQ_MPIC: { 1114 struct fd f; 1115 struct kvm_device *dev; 1116 1117 r = -EBADF; 1118 f = fdget(cap->args[0]); 1119 if (!f.file) 1120 break; 1121 1122 r = -EPERM; 1123 dev = kvm_device_from_filp(f.file); 1124 if (dev) 1125 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1126 1127 fdput(f); 1128 break; 1129 } 1130 #endif 1131 #ifdef CONFIG_KVM_XICS 1132 case KVM_CAP_IRQ_XICS: { 1133 struct fd f; 1134 struct kvm_device *dev; 1135 1136 r = -EBADF; 1137 f = fdget(cap->args[0]); 1138 if (!f.file) 1139 break; 1140 1141 r = -EPERM; 1142 dev = kvm_device_from_filp(f.file); 1143 if (dev) 1144 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1145 1146 fdput(f); 1147 break; 1148 } 1149 #endif /* CONFIG_KVM_XICS */ 1150 default: 1151 r = -EINVAL; 1152 break; 1153 } 1154 1155 if (!r) 1156 r = kvmppc_sanity_check(vcpu); 1157 1158 return r; 1159 } 1160 1161 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1162 struct kvm_mp_state *mp_state) 1163 { 1164 return -EINVAL; 1165 } 1166 1167 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1168 struct kvm_mp_state *mp_state) 1169 { 1170 return -EINVAL; 1171 } 1172 1173 long kvm_arch_vcpu_ioctl(struct file *filp, 1174 unsigned int ioctl, unsigned long arg) 1175 { 1176 struct kvm_vcpu *vcpu = filp->private_data; 1177 void __user *argp = (void __user *)arg; 1178 long r; 1179 1180 switch (ioctl) { 1181 case KVM_INTERRUPT: { 1182 struct kvm_interrupt irq; 1183 r = -EFAULT; 1184 if (copy_from_user(&irq, argp, sizeof(irq))) 1185 goto out; 1186 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 1187 goto out; 1188 } 1189 1190 case KVM_ENABLE_CAP: 1191 { 1192 struct kvm_enable_cap cap; 1193 r = -EFAULT; 1194 if (copy_from_user(&cap, argp, sizeof(cap))) 1195 goto out; 1196 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1197 break; 1198 } 1199 1200 case KVM_SET_ONE_REG: 1201 case KVM_GET_ONE_REG: 1202 { 1203 struct kvm_one_reg reg; 1204 r = -EFAULT; 1205 if (copy_from_user(®, argp, sizeof(reg))) 1206 goto out; 1207 if (ioctl == KVM_SET_ONE_REG) 1208 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); 1209 else 1210 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); 1211 break; 1212 } 1213 1214 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) 1215 case KVM_DIRTY_TLB: { 1216 struct kvm_dirty_tlb dirty; 1217 r = -EFAULT; 1218 if (copy_from_user(&dirty, argp, sizeof(dirty))) 1219 goto out; 1220 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 1221 break; 1222 } 1223 #endif 1224 default: 1225 r = -EINVAL; 1226 } 1227 1228 out: 1229 return r; 1230 } 1231 1232 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 1233 { 1234 return VM_FAULT_SIGBUS; 1235 } 1236 1237 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 1238 { 1239 u32 inst_nop = 0x60000000; 1240 #ifdef CONFIG_KVM_BOOKE_HV 1241 u32 inst_sc1 = 0x44000022; 1242 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); 1243 pvinfo->hcall[1] = cpu_to_be32(inst_nop); 1244 pvinfo->hcall[2] = cpu_to_be32(inst_nop); 1245 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1246 #else 1247 u32 inst_lis = 0x3c000000; 1248 u32 inst_ori = 0x60000000; 1249 u32 inst_sc = 0x44000002; 1250 u32 inst_imm_mask = 0xffff; 1251 1252 /* 1253 * The hypercall to get into KVM from within guest context is as 1254 * follows: 1255 * 1256 * lis r0, r0, KVM_SC_MAGIC_R0@h 1257 * ori r0, KVM_SC_MAGIC_R0@l 1258 * sc 1259 * nop 1260 */ 1261 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); 1262 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); 1263 pvinfo->hcall[2] = cpu_to_be32(inst_sc); 1264 pvinfo->hcall[3] = cpu_to_be32(inst_nop); 1265 #endif 1266 1267 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; 1268 1269 return 0; 1270 } 1271 1272 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 1273 bool line_status) 1274 { 1275 if (!irqchip_in_kernel(kvm)) 1276 return -ENXIO; 1277 1278 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1279 irq_event->irq, irq_event->level, 1280 line_status); 1281 return 0; 1282 } 1283 1284 1285 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 1286 struct kvm_enable_cap *cap) 1287 { 1288 int r; 1289 1290 if (cap->flags) 1291 return -EINVAL; 1292 1293 switch (cap->cap) { 1294 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 1295 case KVM_CAP_PPC_ENABLE_HCALL: { 1296 unsigned long hcall = cap->args[0]; 1297 1298 r = -EINVAL; 1299 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || 1300 cap->args[1] > 1) 1301 break; 1302 if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) 1303 break; 1304 if (cap->args[1]) 1305 set_bit(hcall / 4, kvm->arch.enabled_hcalls); 1306 else 1307 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); 1308 r = 0; 1309 break; 1310 } 1311 #endif 1312 default: 1313 r = -EINVAL; 1314 break; 1315 } 1316 1317 return r; 1318 } 1319 1320 long kvm_arch_vm_ioctl(struct file *filp, 1321 unsigned int ioctl, unsigned long arg) 1322 { 1323 struct kvm *kvm __maybe_unused = filp->private_data; 1324 void __user *argp = (void __user *)arg; 1325 long r; 1326 1327 switch (ioctl) { 1328 case KVM_PPC_GET_PVINFO: { 1329 struct kvm_ppc_pvinfo pvinfo; 1330 memset(&pvinfo, 0, sizeof(pvinfo)); 1331 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 1332 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 1333 r = -EFAULT; 1334 goto out; 1335 } 1336 1337 break; 1338 } 1339 case KVM_ENABLE_CAP: 1340 { 1341 struct kvm_enable_cap cap; 1342 r = -EFAULT; 1343 if (copy_from_user(&cap, argp, sizeof(cap))) 1344 goto out; 1345 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 1346 break; 1347 } 1348 #ifdef CONFIG_PPC_BOOK3S_64 1349 case KVM_CREATE_SPAPR_TCE_64: { 1350 struct kvm_create_spapr_tce_64 create_tce_64; 1351 1352 r = -EFAULT; 1353 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) 1354 goto out; 1355 if (create_tce_64.flags) { 1356 r = -EINVAL; 1357 goto out; 1358 } 1359 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 1360 goto out; 1361 } 1362 case KVM_CREATE_SPAPR_TCE: { 1363 struct kvm_create_spapr_tce create_tce; 1364 struct kvm_create_spapr_tce_64 create_tce_64; 1365 1366 r = -EFAULT; 1367 if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 1368 goto out; 1369 1370 create_tce_64.liobn = create_tce.liobn; 1371 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; 1372 create_tce_64.offset = 0; 1373 create_tce_64.size = create_tce.window_size >> 1374 IOMMU_PAGE_SHIFT_4K; 1375 create_tce_64.flags = 0; 1376 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 1377 goto out; 1378 } 1379 case KVM_PPC_GET_SMMU_INFO: { 1380 struct kvm_ppc_smmu_info info; 1381 struct kvm *kvm = filp->private_data; 1382 1383 memset(&info, 0, sizeof(info)); 1384 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); 1385 if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) 1386 r = -EFAULT; 1387 break; 1388 } 1389 case KVM_PPC_RTAS_DEFINE_TOKEN: { 1390 struct kvm *kvm = filp->private_data; 1391 1392 r = kvm_vm_ioctl_rtas_define_token(kvm, argp); 1393 break; 1394 } 1395 default: { 1396 struct kvm *kvm = filp->private_data; 1397 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1398 } 1399 #else /* CONFIG_PPC_BOOK3S_64 */ 1400 default: 1401 r = -ENOTTY; 1402 #endif 1403 } 1404 out: 1405 return r; 1406 } 1407 1408 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; 1409 static unsigned long nr_lpids; 1410 1411 long kvmppc_alloc_lpid(void) 1412 { 1413 long lpid; 1414 1415 do { 1416 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); 1417 if (lpid >= nr_lpids) { 1418 pr_err("%s: No LPIDs free\n", __func__); 1419 return -ENOMEM; 1420 } 1421 } while (test_and_set_bit(lpid, lpid_inuse)); 1422 1423 return lpid; 1424 } 1425 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); 1426 1427 void kvmppc_claim_lpid(long lpid) 1428 { 1429 set_bit(lpid, lpid_inuse); 1430 } 1431 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); 1432 1433 void kvmppc_free_lpid(long lpid) 1434 { 1435 clear_bit(lpid, lpid_inuse); 1436 } 1437 EXPORT_SYMBOL_GPL(kvmppc_free_lpid); 1438 1439 void kvmppc_init_lpid(unsigned long nr_lpids_param) 1440 { 1441 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); 1442 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 1443 } 1444 EXPORT_SYMBOL_GPL(kvmppc_init_lpid); 1445 1446 int kvm_arch_init(void *opaque) 1447 { 1448 return 0; 1449 } 1450 1451 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); 1452