1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * 9 * Authors: 10 * Avi Kivity <avi@qumranet.com> 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2. See 14 * the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "iodev.h" 19 20 #include <linux/kvm_host.h> 21 #include <linux/kvm.h> 22 #include <linux/module.h> 23 #include <linux/errno.h> 24 #include <linux/percpu.h> 25 #include <linux/mm.h> 26 #include <linux/miscdevice.h> 27 #include <linux/vmalloc.h> 28 #include <linux/reboot.h> 29 #include <linux/debugfs.h> 30 #include <linux/highmem.h> 31 #include <linux/file.h> 32 #include <linux/sysdev.h> 33 #include <linux/cpu.h> 34 #include <linux/sched.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 50 #include <asm/processor.h> 51 #include <asm/io.h> 52 #include <asm/uaccess.h> 53 #include <asm/pgtable.h> 54 #include <asm-generic/bitops/le.h> 55 56 #include "coalesced_mmio.h" 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/kvm.h> 60 61 MODULE_AUTHOR("Qumranet"); 62 MODULE_LICENSE("GPL"); 63 64 /* 65 * Ordering of locks: 66 * 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 68 */ 69 70 DEFINE_SPINLOCK(kvm_lock); 71 LIST_HEAD(vm_list); 72 73 static cpumask_var_t cpus_hardware_enabled; 74 static int kvm_usage_count = 0; 75 static atomic_t hardware_enable_failed; 76 77 struct kmem_cache *kvm_vcpu_cache; 78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 79 80 static __read_mostly struct preempt_ops kvm_preempt_ops; 81 82 struct dentry *kvm_debugfs_dir; 83 84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 85 unsigned long arg); 86 static int hardware_enable_all(void); 87 static void hardware_disable_all(void); 88 89 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 90 91 static bool kvm_rebooting; 92 93 static bool largepages_enabled = true; 94 95 inline int kvm_is_mmio_pfn(pfn_t pfn) 96 { 97 if (pfn_valid(pfn)) { 98 struct page *page = compound_head(pfn_to_page(pfn)); 99 return PageReserved(page); 100 } 101 102 return true; 103 } 104 105 /* 106 * Switches to specified vcpu, until a matching vcpu_put() 107 */ 108 void vcpu_load(struct kvm_vcpu *vcpu) 109 { 110 int cpu; 111 112 mutex_lock(&vcpu->mutex); 113 cpu = get_cpu(); 114 preempt_notifier_register(&vcpu->preempt_notifier); 115 kvm_arch_vcpu_load(vcpu, cpu); 116 put_cpu(); 117 } 118 119 void vcpu_put(struct kvm_vcpu *vcpu) 120 { 121 preempt_disable(); 122 kvm_arch_vcpu_put(vcpu); 123 preempt_notifier_unregister(&vcpu->preempt_notifier); 124 preempt_enable(); 125 mutex_unlock(&vcpu->mutex); 126 } 127 128 static void ack_flush(void *_completed) 129 { 130 } 131 132 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 133 { 134 int i, cpu, me; 135 cpumask_var_t cpus; 136 bool called = true; 137 struct kvm_vcpu *vcpu; 138 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 140 141 raw_spin_lock(&kvm->requests_lock); 142 me = smp_processor_id(); 143 kvm_for_each_vcpu(i, vcpu, kvm) { 144 if (test_and_set_bit(req, &vcpu->requests)) 145 continue; 146 cpu = vcpu->cpu; 147 if (cpus != NULL && cpu != -1 && cpu != me) 148 cpumask_set_cpu(cpu, cpus); 149 } 150 if (unlikely(cpus == NULL)) 151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 152 else if (!cpumask_empty(cpus)) 153 smp_call_function_many(cpus, ack_flush, NULL, 1); 154 else 155 called = false; 156 raw_spin_unlock(&kvm->requests_lock); 157 free_cpumask_var(cpus); 158 return called; 159 } 160 161 void kvm_flush_remote_tlbs(struct kvm *kvm) 162 { 163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 164 ++kvm->stat.remote_tlb_flush; 165 } 166 167 void kvm_reload_remote_mmus(struct kvm *kvm) 168 { 169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 170 } 171 172 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 173 { 174 struct page *page; 175 int r; 176 177 mutex_init(&vcpu->mutex); 178 vcpu->cpu = -1; 179 vcpu->kvm = kvm; 180 vcpu->vcpu_id = id; 181 init_waitqueue_head(&vcpu->wq); 182 183 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 184 if (!page) { 185 r = -ENOMEM; 186 goto fail; 187 } 188 vcpu->run = page_address(page); 189 190 r = kvm_arch_vcpu_init(vcpu); 191 if (r < 0) 192 goto fail_free_run; 193 return 0; 194 195 fail_free_run: 196 free_page((unsigned long)vcpu->run); 197 fail: 198 return r; 199 } 200 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 201 202 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 203 { 204 kvm_arch_vcpu_uninit(vcpu); 205 free_page((unsigned long)vcpu->run); 206 } 207 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 208 209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 210 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 211 { 212 return container_of(mn, struct kvm, mmu_notifier); 213 } 214 215 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 216 struct mm_struct *mm, 217 unsigned long address) 218 { 219 struct kvm *kvm = mmu_notifier_to_kvm(mn); 220 int need_tlb_flush, idx; 221 222 /* 223 * When ->invalidate_page runs, the linux pte has been zapped 224 * already but the page is still allocated until 225 * ->invalidate_page returns. So if we increase the sequence 226 * here the kvm page fault will notice if the spte can't be 227 * established because the page is going to be freed. If 228 * instead the kvm page fault establishes the spte before 229 * ->invalidate_page runs, kvm_unmap_hva will release it 230 * before returning. 231 * 232 * The sequence increase only need to be seen at spin_unlock 233 * time, and not at spin_lock time. 234 * 235 * Increasing the sequence after the spin_unlock would be 236 * unsafe because the kvm page fault could then establish the 237 * pte after kvm_unmap_hva returned, without noticing the page 238 * is going to be freed. 239 */ 240 idx = srcu_read_lock(&kvm->srcu); 241 spin_lock(&kvm->mmu_lock); 242 kvm->mmu_notifier_seq++; 243 need_tlb_flush = kvm_unmap_hva(kvm, address); 244 spin_unlock(&kvm->mmu_lock); 245 srcu_read_unlock(&kvm->srcu, idx); 246 247 /* we've to flush the tlb before the pages can be freed */ 248 if (need_tlb_flush) 249 kvm_flush_remote_tlbs(kvm); 250 251 } 252 253 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 254 struct mm_struct *mm, 255 unsigned long address, 256 pte_t pte) 257 { 258 struct kvm *kvm = mmu_notifier_to_kvm(mn); 259 int idx; 260 261 idx = srcu_read_lock(&kvm->srcu); 262 spin_lock(&kvm->mmu_lock); 263 kvm->mmu_notifier_seq++; 264 kvm_set_spte_hva(kvm, address, pte); 265 spin_unlock(&kvm->mmu_lock); 266 srcu_read_unlock(&kvm->srcu, idx); 267 } 268 269 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 270 struct mm_struct *mm, 271 unsigned long start, 272 unsigned long end) 273 { 274 struct kvm *kvm = mmu_notifier_to_kvm(mn); 275 int need_tlb_flush = 0, idx; 276 277 idx = srcu_read_lock(&kvm->srcu); 278 spin_lock(&kvm->mmu_lock); 279 /* 280 * The count increase must become visible at unlock time as no 281 * spte can be established without taking the mmu_lock and 282 * count is also read inside the mmu_lock critical section. 283 */ 284 kvm->mmu_notifier_count++; 285 for (; start < end; start += PAGE_SIZE) 286 need_tlb_flush |= kvm_unmap_hva(kvm, start); 287 spin_unlock(&kvm->mmu_lock); 288 srcu_read_unlock(&kvm->srcu, idx); 289 290 /* we've to flush the tlb before the pages can be freed */ 291 if (need_tlb_flush) 292 kvm_flush_remote_tlbs(kvm); 293 } 294 295 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 296 struct mm_struct *mm, 297 unsigned long start, 298 unsigned long end) 299 { 300 struct kvm *kvm = mmu_notifier_to_kvm(mn); 301 302 spin_lock(&kvm->mmu_lock); 303 /* 304 * This sequence increase will notify the kvm page fault that 305 * the page that is going to be mapped in the spte could have 306 * been freed. 307 */ 308 kvm->mmu_notifier_seq++; 309 /* 310 * The above sequence increase must be visible before the 311 * below count decrease but both values are read by the kvm 312 * page fault under mmu_lock spinlock so we don't need to add 313 * a smb_wmb() here in between the two. 314 */ 315 kvm->mmu_notifier_count--; 316 spin_unlock(&kvm->mmu_lock); 317 318 BUG_ON(kvm->mmu_notifier_count < 0); 319 } 320 321 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 322 struct mm_struct *mm, 323 unsigned long address) 324 { 325 struct kvm *kvm = mmu_notifier_to_kvm(mn); 326 int young, idx; 327 328 idx = srcu_read_lock(&kvm->srcu); 329 spin_lock(&kvm->mmu_lock); 330 young = kvm_age_hva(kvm, address); 331 spin_unlock(&kvm->mmu_lock); 332 srcu_read_unlock(&kvm->srcu, idx); 333 334 if (young) 335 kvm_flush_remote_tlbs(kvm); 336 337 return young; 338 } 339 340 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 341 struct mm_struct *mm) 342 { 343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 344 int idx; 345 346 idx = srcu_read_lock(&kvm->srcu); 347 kvm_arch_flush_shadow(kvm); 348 srcu_read_unlock(&kvm->srcu, idx); 349 } 350 351 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 352 .invalidate_page = kvm_mmu_notifier_invalidate_page, 353 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 354 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 355 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 356 .change_pte = kvm_mmu_notifier_change_pte, 357 .release = kvm_mmu_notifier_release, 358 }; 359 360 static int kvm_init_mmu_notifier(struct kvm *kvm) 361 { 362 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 363 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 364 } 365 366 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 367 368 static int kvm_init_mmu_notifier(struct kvm *kvm) 369 { 370 return 0; 371 } 372 373 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 374 375 static struct kvm *kvm_create_vm(void) 376 { 377 int r = 0, i; 378 struct kvm *kvm = kvm_arch_create_vm(); 379 380 if (IS_ERR(kvm)) 381 goto out; 382 383 r = hardware_enable_all(); 384 if (r) 385 goto out_err_nodisable; 386 387 #ifdef CONFIG_HAVE_KVM_IRQCHIP 388 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 389 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 390 #endif 391 392 r = -ENOMEM; 393 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 394 if (!kvm->memslots) 395 goto out_err; 396 if (init_srcu_struct(&kvm->srcu)) 397 goto out_err; 398 for (i = 0; i < KVM_NR_BUSES; i++) { 399 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 400 GFP_KERNEL); 401 if (!kvm->buses[i]) { 402 cleanup_srcu_struct(&kvm->srcu); 403 goto out_err; 404 } 405 } 406 407 r = kvm_init_mmu_notifier(kvm); 408 if (r) { 409 cleanup_srcu_struct(&kvm->srcu); 410 goto out_err; 411 } 412 413 kvm->mm = current->mm; 414 atomic_inc(&kvm->mm->mm_count); 415 spin_lock_init(&kvm->mmu_lock); 416 raw_spin_lock_init(&kvm->requests_lock); 417 kvm_eventfd_init(kvm); 418 mutex_init(&kvm->lock); 419 mutex_init(&kvm->irq_lock); 420 mutex_init(&kvm->slots_lock); 421 atomic_set(&kvm->users_count, 1); 422 spin_lock(&kvm_lock); 423 list_add(&kvm->vm_list, &vm_list); 424 spin_unlock(&kvm_lock); 425 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 426 kvm_coalesced_mmio_init(kvm); 427 #endif 428 out: 429 return kvm; 430 431 out_err: 432 hardware_disable_all(); 433 out_err_nodisable: 434 for (i = 0; i < KVM_NR_BUSES; i++) 435 kfree(kvm->buses[i]); 436 kfree(kvm->memslots); 437 kfree(kvm); 438 return ERR_PTR(r); 439 } 440 441 /* 442 * Free any memory in @free but not in @dont. 443 */ 444 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 445 struct kvm_memory_slot *dont) 446 { 447 int i; 448 449 if (!dont || free->rmap != dont->rmap) 450 vfree(free->rmap); 451 452 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 453 vfree(free->dirty_bitmap); 454 455 456 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 457 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 458 vfree(free->lpage_info[i]); 459 free->lpage_info[i] = NULL; 460 } 461 } 462 463 free->npages = 0; 464 free->dirty_bitmap = NULL; 465 free->rmap = NULL; 466 } 467 468 void kvm_free_physmem(struct kvm *kvm) 469 { 470 int i; 471 struct kvm_memslots *slots = kvm->memslots; 472 473 for (i = 0; i < slots->nmemslots; ++i) 474 kvm_free_physmem_slot(&slots->memslots[i], NULL); 475 476 kfree(kvm->memslots); 477 } 478 479 static void kvm_destroy_vm(struct kvm *kvm) 480 { 481 int i; 482 struct mm_struct *mm = kvm->mm; 483 484 kvm_arch_sync_events(kvm); 485 spin_lock(&kvm_lock); 486 list_del(&kvm->vm_list); 487 spin_unlock(&kvm_lock); 488 kvm_free_irq_routing(kvm); 489 for (i = 0; i < KVM_NR_BUSES; i++) 490 kvm_io_bus_destroy(kvm->buses[i]); 491 kvm_coalesced_mmio_free(kvm); 492 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 493 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 494 #else 495 kvm_arch_flush_shadow(kvm); 496 #endif 497 kvm_arch_destroy_vm(kvm); 498 hardware_disable_all(); 499 mmdrop(mm); 500 } 501 502 void kvm_get_kvm(struct kvm *kvm) 503 { 504 atomic_inc(&kvm->users_count); 505 } 506 EXPORT_SYMBOL_GPL(kvm_get_kvm); 507 508 void kvm_put_kvm(struct kvm *kvm) 509 { 510 if (atomic_dec_and_test(&kvm->users_count)) 511 kvm_destroy_vm(kvm); 512 } 513 EXPORT_SYMBOL_GPL(kvm_put_kvm); 514 515 516 static int kvm_vm_release(struct inode *inode, struct file *filp) 517 { 518 struct kvm *kvm = filp->private_data; 519 520 kvm_irqfd_release(kvm); 521 522 kvm_put_kvm(kvm); 523 return 0; 524 } 525 526 /* 527 * Allocate some memory and give it an address in the guest physical address 528 * space. 529 * 530 * Discontiguous memory is allowed, mostly for framebuffers. 531 * 532 * Must be called holding mmap_sem for write. 533 */ 534 int __kvm_set_memory_region(struct kvm *kvm, 535 struct kvm_userspace_memory_region *mem, 536 int user_alloc) 537 { 538 int r, flush_shadow = 0; 539 gfn_t base_gfn; 540 unsigned long npages; 541 unsigned long i; 542 struct kvm_memory_slot *memslot; 543 struct kvm_memory_slot old, new; 544 struct kvm_memslots *slots, *old_memslots; 545 546 r = -EINVAL; 547 /* General sanity checks */ 548 if (mem->memory_size & (PAGE_SIZE - 1)) 549 goto out; 550 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 551 goto out; 552 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 553 goto out; 554 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 555 goto out; 556 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 557 goto out; 558 559 memslot = &kvm->memslots->memslots[mem->slot]; 560 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 561 npages = mem->memory_size >> PAGE_SHIFT; 562 563 if (!npages) 564 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 565 566 new = old = *memslot; 567 568 new.base_gfn = base_gfn; 569 new.npages = npages; 570 new.flags = mem->flags; 571 572 /* Disallow changing a memory slot's size. */ 573 r = -EINVAL; 574 if (npages && old.npages && npages != old.npages) 575 goto out_free; 576 577 /* Check for overlaps */ 578 r = -EEXIST; 579 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 580 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 581 582 if (s == memslot || !s->npages) 583 continue; 584 if (!((base_gfn + npages <= s->base_gfn) || 585 (base_gfn >= s->base_gfn + s->npages))) 586 goto out_free; 587 } 588 589 /* Free page dirty bitmap if unneeded */ 590 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 591 new.dirty_bitmap = NULL; 592 593 r = -ENOMEM; 594 595 /* Allocate if a slot is being created */ 596 #ifndef CONFIG_S390 597 if (npages && !new.rmap) { 598 new.rmap = vmalloc(npages * sizeof(struct page *)); 599 600 if (!new.rmap) 601 goto out_free; 602 603 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 604 605 new.user_alloc = user_alloc; 606 new.userspace_addr = mem->userspace_addr; 607 } 608 if (!npages) 609 goto skip_lpage; 610 611 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 612 unsigned long ugfn; 613 unsigned long j; 614 int lpages; 615 int level = i + 2; 616 617 /* Avoid unused variable warning if no large pages */ 618 (void)level; 619 620 if (new.lpage_info[i]) 621 continue; 622 623 lpages = 1 + (base_gfn + npages - 1) / 624 KVM_PAGES_PER_HPAGE(level); 625 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 626 627 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 628 629 if (!new.lpage_info[i]) 630 goto out_free; 631 632 memset(new.lpage_info[i], 0, 633 lpages * sizeof(*new.lpage_info[i])); 634 635 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 636 new.lpage_info[i][0].write_count = 1; 637 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 638 new.lpage_info[i][lpages - 1].write_count = 1; 639 ugfn = new.userspace_addr >> PAGE_SHIFT; 640 /* 641 * If the gfn and userspace address are not aligned wrt each 642 * other, or if explicitly asked to, disable large page 643 * support for this slot 644 */ 645 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 646 !largepages_enabled) 647 for (j = 0; j < lpages; ++j) 648 new.lpage_info[i][j].write_count = 1; 649 } 650 651 skip_lpage: 652 653 /* Allocate page dirty bitmap if needed */ 654 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 655 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); 656 657 new.dirty_bitmap = vmalloc(dirty_bytes); 658 if (!new.dirty_bitmap) 659 goto out_free; 660 memset(new.dirty_bitmap, 0, dirty_bytes); 661 /* destroy any largepage mappings for dirty tracking */ 662 if (old.npages) 663 flush_shadow = 1; 664 } 665 #else /* not defined CONFIG_S390 */ 666 new.user_alloc = user_alloc; 667 if (user_alloc) 668 new.userspace_addr = mem->userspace_addr; 669 #endif /* not defined CONFIG_S390 */ 670 671 if (!npages) { 672 r = -ENOMEM; 673 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 674 if (!slots) 675 goto out_free; 676 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 677 if (mem->slot >= slots->nmemslots) 678 slots->nmemslots = mem->slot + 1; 679 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 680 681 old_memslots = kvm->memslots; 682 rcu_assign_pointer(kvm->memslots, slots); 683 synchronize_srcu_expedited(&kvm->srcu); 684 /* From this point no new shadow pages pointing to a deleted 685 * memslot will be created. 686 * 687 * validation of sp->gfn happens in: 688 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 689 * - kvm_is_visible_gfn (mmu_check_roots) 690 */ 691 kvm_arch_flush_shadow(kvm); 692 kfree(old_memslots); 693 } 694 695 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 696 if (r) 697 goto out_free; 698 699 #ifdef CONFIG_DMAR 700 /* map the pages in iommu page table */ 701 if (npages) { 702 r = kvm_iommu_map_pages(kvm, &new); 703 if (r) 704 goto out_free; 705 } 706 #endif 707 708 r = -ENOMEM; 709 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 710 if (!slots) 711 goto out_free; 712 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 713 if (mem->slot >= slots->nmemslots) 714 slots->nmemslots = mem->slot + 1; 715 716 /* actual memory is freed via old in kvm_free_physmem_slot below */ 717 if (!npages) { 718 new.rmap = NULL; 719 new.dirty_bitmap = NULL; 720 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 721 new.lpage_info[i] = NULL; 722 } 723 724 slots->memslots[mem->slot] = new; 725 old_memslots = kvm->memslots; 726 rcu_assign_pointer(kvm->memslots, slots); 727 synchronize_srcu_expedited(&kvm->srcu); 728 729 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 730 731 kvm_free_physmem_slot(&old, &new); 732 kfree(old_memslots); 733 734 if (flush_shadow) 735 kvm_arch_flush_shadow(kvm); 736 737 return 0; 738 739 out_free: 740 kvm_free_physmem_slot(&new, &old); 741 out: 742 return r; 743 744 } 745 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 746 747 int kvm_set_memory_region(struct kvm *kvm, 748 struct kvm_userspace_memory_region *mem, 749 int user_alloc) 750 { 751 int r; 752 753 mutex_lock(&kvm->slots_lock); 754 r = __kvm_set_memory_region(kvm, mem, user_alloc); 755 mutex_unlock(&kvm->slots_lock); 756 return r; 757 } 758 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 759 760 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 761 struct 762 kvm_userspace_memory_region *mem, 763 int user_alloc) 764 { 765 if (mem->slot >= KVM_MEMORY_SLOTS) 766 return -EINVAL; 767 return kvm_set_memory_region(kvm, mem, user_alloc); 768 } 769 770 int kvm_get_dirty_log(struct kvm *kvm, 771 struct kvm_dirty_log *log, int *is_dirty) 772 { 773 struct kvm_memory_slot *memslot; 774 int r, i; 775 unsigned long n; 776 unsigned long any = 0; 777 778 r = -EINVAL; 779 if (log->slot >= KVM_MEMORY_SLOTS) 780 goto out; 781 782 memslot = &kvm->memslots->memslots[log->slot]; 783 r = -ENOENT; 784 if (!memslot->dirty_bitmap) 785 goto out; 786 787 n = kvm_dirty_bitmap_bytes(memslot); 788 789 for (i = 0; !any && i < n/sizeof(long); ++i) 790 any = memslot->dirty_bitmap[i]; 791 792 r = -EFAULT; 793 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 794 goto out; 795 796 if (any) 797 *is_dirty = 1; 798 799 r = 0; 800 out: 801 return r; 802 } 803 804 void kvm_disable_largepages(void) 805 { 806 largepages_enabled = false; 807 } 808 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 809 810 int is_error_page(struct page *page) 811 { 812 return page == bad_page; 813 } 814 EXPORT_SYMBOL_GPL(is_error_page); 815 816 int is_error_pfn(pfn_t pfn) 817 { 818 return pfn == bad_pfn; 819 } 820 EXPORT_SYMBOL_GPL(is_error_pfn); 821 822 static inline unsigned long bad_hva(void) 823 { 824 return PAGE_OFFSET; 825 } 826 827 int kvm_is_error_hva(unsigned long addr) 828 { 829 return addr == bad_hva(); 830 } 831 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 832 833 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 834 { 835 int i; 836 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 837 838 for (i = 0; i < slots->nmemslots; ++i) { 839 struct kvm_memory_slot *memslot = &slots->memslots[i]; 840 841 if (gfn >= memslot->base_gfn 842 && gfn < memslot->base_gfn + memslot->npages) 843 return memslot; 844 } 845 return NULL; 846 } 847 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 848 849 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 850 { 851 gfn = unalias_gfn(kvm, gfn); 852 return gfn_to_memslot_unaliased(kvm, gfn); 853 } 854 855 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 856 { 857 int i; 858 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 859 860 gfn = unalias_gfn_instantiation(kvm, gfn); 861 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 862 struct kvm_memory_slot *memslot = &slots->memslots[i]; 863 864 if (memslot->flags & KVM_MEMSLOT_INVALID) 865 continue; 866 867 if (gfn >= memslot->base_gfn 868 && gfn < memslot->base_gfn + memslot->npages) 869 return 1; 870 } 871 return 0; 872 } 873 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 874 875 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 876 { 877 struct vm_area_struct *vma; 878 unsigned long addr, size; 879 880 size = PAGE_SIZE; 881 882 addr = gfn_to_hva(kvm, gfn); 883 if (kvm_is_error_hva(addr)) 884 return PAGE_SIZE; 885 886 down_read(¤t->mm->mmap_sem); 887 vma = find_vma(current->mm, addr); 888 if (!vma) 889 goto out; 890 891 size = vma_kernel_pagesize(vma); 892 893 out: 894 up_read(¤t->mm->mmap_sem); 895 896 return size; 897 } 898 899 int memslot_id(struct kvm *kvm, gfn_t gfn) 900 { 901 int i; 902 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 903 struct kvm_memory_slot *memslot = NULL; 904 905 gfn = unalias_gfn(kvm, gfn); 906 for (i = 0; i < slots->nmemslots; ++i) { 907 memslot = &slots->memslots[i]; 908 909 if (gfn >= memslot->base_gfn 910 && gfn < memslot->base_gfn + memslot->npages) 911 break; 912 } 913 914 return memslot - slots->memslots; 915 } 916 917 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 918 { 919 struct kvm_memory_slot *slot; 920 921 gfn = unalias_gfn_instantiation(kvm, gfn); 922 slot = gfn_to_memslot_unaliased(kvm, gfn); 923 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 924 return bad_hva(); 925 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 926 } 927 EXPORT_SYMBOL_GPL(gfn_to_hva); 928 929 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 930 { 931 struct page *page[1]; 932 int npages; 933 pfn_t pfn; 934 935 might_sleep(); 936 937 npages = get_user_pages_fast(addr, 1, 1, page); 938 939 if (unlikely(npages != 1)) { 940 struct vm_area_struct *vma; 941 942 down_read(¤t->mm->mmap_sem); 943 vma = find_vma(current->mm, addr); 944 945 if (vma == NULL || addr < vma->vm_start || 946 !(vma->vm_flags & VM_PFNMAP)) { 947 up_read(¤t->mm->mmap_sem); 948 get_page(bad_page); 949 return page_to_pfn(bad_page); 950 } 951 952 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 953 up_read(¤t->mm->mmap_sem); 954 BUG_ON(!kvm_is_mmio_pfn(pfn)); 955 } else 956 pfn = page_to_pfn(page[0]); 957 958 return pfn; 959 } 960 961 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 962 { 963 unsigned long addr; 964 965 addr = gfn_to_hva(kvm, gfn); 966 if (kvm_is_error_hva(addr)) { 967 get_page(bad_page); 968 return page_to_pfn(bad_page); 969 } 970 971 return hva_to_pfn(kvm, addr); 972 } 973 EXPORT_SYMBOL_GPL(gfn_to_pfn); 974 975 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 976 { 977 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 978 } 979 980 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 981 struct kvm_memory_slot *slot, gfn_t gfn) 982 { 983 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 984 return hva_to_pfn(kvm, addr); 985 } 986 987 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 988 { 989 pfn_t pfn; 990 991 pfn = gfn_to_pfn(kvm, gfn); 992 if (!kvm_is_mmio_pfn(pfn)) 993 return pfn_to_page(pfn); 994 995 WARN_ON(kvm_is_mmio_pfn(pfn)); 996 997 get_page(bad_page); 998 return bad_page; 999 } 1000 1001 EXPORT_SYMBOL_GPL(gfn_to_page); 1002 1003 void kvm_release_page_clean(struct page *page) 1004 { 1005 kvm_release_pfn_clean(page_to_pfn(page)); 1006 } 1007 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1008 1009 void kvm_release_pfn_clean(pfn_t pfn) 1010 { 1011 if (!kvm_is_mmio_pfn(pfn)) 1012 put_page(pfn_to_page(pfn)); 1013 } 1014 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1015 1016 void kvm_release_page_dirty(struct page *page) 1017 { 1018 kvm_release_pfn_dirty(page_to_pfn(page)); 1019 } 1020 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1021 1022 void kvm_release_pfn_dirty(pfn_t pfn) 1023 { 1024 kvm_set_pfn_dirty(pfn); 1025 kvm_release_pfn_clean(pfn); 1026 } 1027 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1028 1029 void kvm_set_page_dirty(struct page *page) 1030 { 1031 kvm_set_pfn_dirty(page_to_pfn(page)); 1032 } 1033 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1034 1035 void kvm_set_pfn_dirty(pfn_t pfn) 1036 { 1037 if (!kvm_is_mmio_pfn(pfn)) { 1038 struct page *page = pfn_to_page(pfn); 1039 if (!PageReserved(page)) 1040 SetPageDirty(page); 1041 } 1042 } 1043 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1044 1045 void kvm_set_pfn_accessed(pfn_t pfn) 1046 { 1047 if (!kvm_is_mmio_pfn(pfn)) 1048 mark_page_accessed(pfn_to_page(pfn)); 1049 } 1050 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1051 1052 void kvm_get_pfn(pfn_t pfn) 1053 { 1054 if (!kvm_is_mmio_pfn(pfn)) 1055 get_page(pfn_to_page(pfn)); 1056 } 1057 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1058 1059 static int next_segment(unsigned long len, int offset) 1060 { 1061 if (len > PAGE_SIZE - offset) 1062 return PAGE_SIZE - offset; 1063 else 1064 return len; 1065 } 1066 1067 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1068 int len) 1069 { 1070 int r; 1071 unsigned long addr; 1072 1073 addr = gfn_to_hva(kvm, gfn); 1074 if (kvm_is_error_hva(addr)) 1075 return -EFAULT; 1076 r = copy_from_user(data, (void __user *)addr + offset, len); 1077 if (r) 1078 return -EFAULT; 1079 return 0; 1080 } 1081 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1082 1083 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1084 { 1085 gfn_t gfn = gpa >> PAGE_SHIFT; 1086 int seg; 1087 int offset = offset_in_page(gpa); 1088 int ret; 1089 1090 while ((seg = next_segment(len, offset)) != 0) { 1091 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1092 if (ret < 0) 1093 return ret; 1094 offset = 0; 1095 len -= seg; 1096 data += seg; 1097 ++gfn; 1098 } 1099 return 0; 1100 } 1101 EXPORT_SYMBOL_GPL(kvm_read_guest); 1102 1103 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1104 unsigned long len) 1105 { 1106 int r; 1107 unsigned long addr; 1108 gfn_t gfn = gpa >> PAGE_SHIFT; 1109 int offset = offset_in_page(gpa); 1110 1111 addr = gfn_to_hva(kvm, gfn); 1112 if (kvm_is_error_hva(addr)) 1113 return -EFAULT; 1114 pagefault_disable(); 1115 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1116 pagefault_enable(); 1117 if (r) 1118 return -EFAULT; 1119 return 0; 1120 } 1121 EXPORT_SYMBOL(kvm_read_guest_atomic); 1122 1123 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1124 int offset, int len) 1125 { 1126 int r; 1127 unsigned long addr; 1128 1129 addr = gfn_to_hva(kvm, gfn); 1130 if (kvm_is_error_hva(addr)) 1131 return -EFAULT; 1132 r = copy_to_user((void __user *)addr + offset, data, len); 1133 if (r) 1134 return -EFAULT; 1135 mark_page_dirty(kvm, gfn); 1136 return 0; 1137 } 1138 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1139 1140 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1141 unsigned long len) 1142 { 1143 gfn_t gfn = gpa >> PAGE_SHIFT; 1144 int seg; 1145 int offset = offset_in_page(gpa); 1146 int ret; 1147 1148 while ((seg = next_segment(len, offset)) != 0) { 1149 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1150 if (ret < 0) 1151 return ret; 1152 offset = 0; 1153 len -= seg; 1154 data += seg; 1155 ++gfn; 1156 } 1157 return 0; 1158 } 1159 1160 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1161 { 1162 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1163 } 1164 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1165 1166 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1167 { 1168 gfn_t gfn = gpa >> PAGE_SHIFT; 1169 int seg; 1170 int offset = offset_in_page(gpa); 1171 int ret; 1172 1173 while ((seg = next_segment(len, offset)) != 0) { 1174 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1175 if (ret < 0) 1176 return ret; 1177 offset = 0; 1178 len -= seg; 1179 ++gfn; 1180 } 1181 return 0; 1182 } 1183 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1184 1185 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1186 { 1187 struct kvm_memory_slot *memslot; 1188 1189 gfn = unalias_gfn(kvm, gfn); 1190 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1191 if (memslot && memslot->dirty_bitmap) { 1192 unsigned long rel_gfn = gfn - memslot->base_gfn; 1193 unsigned long *p = memslot->dirty_bitmap + 1194 rel_gfn / BITS_PER_LONG; 1195 int offset = rel_gfn % BITS_PER_LONG; 1196 1197 /* avoid RMW */ 1198 if (!generic_test_le_bit(offset, p)) 1199 generic___set_le_bit(offset, p); 1200 } 1201 } 1202 1203 /* 1204 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1205 */ 1206 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1207 { 1208 DEFINE_WAIT(wait); 1209 1210 for (;;) { 1211 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1212 1213 if (kvm_arch_vcpu_runnable(vcpu)) { 1214 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1215 break; 1216 } 1217 if (kvm_cpu_has_pending_timer(vcpu)) 1218 break; 1219 if (signal_pending(current)) 1220 break; 1221 1222 schedule(); 1223 } 1224 1225 finish_wait(&vcpu->wq, &wait); 1226 } 1227 1228 void kvm_resched(struct kvm_vcpu *vcpu) 1229 { 1230 if (!need_resched()) 1231 return; 1232 cond_resched(); 1233 } 1234 EXPORT_SYMBOL_GPL(kvm_resched); 1235 1236 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1237 { 1238 ktime_t expires; 1239 DEFINE_WAIT(wait); 1240 1241 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1242 1243 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1244 expires = ktime_add_ns(ktime_get(), 100000UL); 1245 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1246 1247 finish_wait(&vcpu->wq, &wait); 1248 } 1249 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1250 1251 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1252 { 1253 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1254 struct page *page; 1255 1256 if (vmf->pgoff == 0) 1257 page = virt_to_page(vcpu->run); 1258 #ifdef CONFIG_X86 1259 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1260 page = virt_to_page(vcpu->arch.pio_data); 1261 #endif 1262 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1263 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1264 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1265 #endif 1266 else 1267 return VM_FAULT_SIGBUS; 1268 get_page(page); 1269 vmf->page = page; 1270 return 0; 1271 } 1272 1273 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1274 .fault = kvm_vcpu_fault, 1275 }; 1276 1277 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1278 { 1279 vma->vm_ops = &kvm_vcpu_vm_ops; 1280 return 0; 1281 } 1282 1283 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1284 { 1285 struct kvm_vcpu *vcpu = filp->private_data; 1286 1287 kvm_put_kvm(vcpu->kvm); 1288 return 0; 1289 } 1290 1291 static struct file_operations kvm_vcpu_fops = { 1292 .release = kvm_vcpu_release, 1293 .unlocked_ioctl = kvm_vcpu_ioctl, 1294 .compat_ioctl = kvm_vcpu_ioctl, 1295 .mmap = kvm_vcpu_mmap, 1296 }; 1297 1298 /* 1299 * Allocates an inode for the vcpu. 1300 */ 1301 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1302 { 1303 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1304 } 1305 1306 /* 1307 * Creates some virtual cpus. Good luck creating more than one. 1308 */ 1309 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1310 { 1311 int r; 1312 struct kvm_vcpu *vcpu, *v; 1313 1314 vcpu = kvm_arch_vcpu_create(kvm, id); 1315 if (IS_ERR(vcpu)) 1316 return PTR_ERR(vcpu); 1317 1318 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1319 1320 r = kvm_arch_vcpu_setup(vcpu); 1321 if (r) 1322 return r; 1323 1324 mutex_lock(&kvm->lock); 1325 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1326 r = -EINVAL; 1327 goto vcpu_destroy; 1328 } 1329 1330 kvm_for_each_vcpu(r, v, kvm) 1331 if (v->vcpu_id == id) { 1332 r = -EEXIST; 1333 goto vcpu_destroy; 1334 } 1335 1336 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1337 1338 /* Now it's all set up, let userspace reach it */ 1339 kvm_get_kvm(kvm); 1340 r = create_vcpu_fd(vcpu); 1341 if (r < 0) { 1342 kvm_put_kvm(kvm); 1343 goto vcpu_destroy; 1344 } 1345 1346 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1347 smp_wmb(); 1348 atomic_inc(&kvm->online_vcpus); 1349 1350 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1351 if (kvm->bsp_vcpu_id == id) 1352 kvm->bsp_vcpu = vcpu; 1353 #endif 1354 mutex_unlock(&kvm->lock); 1355 return r; 1356 1357 vcpu_destroy: 1358 mutex_unlock(&kvm->lock); 1359 kvm_arch_vcpu_destroy(vcpu); 1360 return r; 1361 } 1362 1363 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1364 { 1365 if (sigset) { 1366 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1367 vcpu->sigset_active = 1; 1368 vcpu->sigset = *sigset; 1369 } else 1370 vcpu->sigset_active = 0; 1371 return 0; 1372 } 1373 1374 static long kvm_vcpu_ioctl(struct file *filp, 1375 unsigned int ioctl, unsigned long arg) 1376 { 1377 struct kvm_vcpu *vcpu = filp->private_data; 1378 void __user *argp = (void __user *)arg; 1379 int r; 1380 struct kvm_fpu *fpu = NULL; 1381 struct kvm_sregs *kvm_sregs = NULL; 1382 1383 if (vcpu->kvm->mm != current->mm) 1384 return -EIO; 1385 switch (ioctl) { 1386 case KVM_RUN: 1387 r = -EINVAL; 1388 if (arg) 1389 goto out; 1390 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1391 break; 1392 case KVM_GET_REGS: { 1393 struct kvm_regs *kvm_regs; 1394 1395 r = -ENOMEM; 1396 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1397 if (!kvm_regs) 1398 goto out; 1399 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1400 if (r) 1401 goto out_free1; 1402 r = -EFAULT; 1403 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1404 goto out_free1; 1405 r = 0; 1406 out_free1: 1407 kfree(kvm_regs); 1408 break; 1409 } 1410 case KVM_SET_REGS: { 1411 struct kvm_regs *kvm_regs; 1412 1413 r = -ENOMEM; 1414 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1415 if (!kvm_regs) 1416 goto out; 1417 r = -EFAULT; 1418 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1419 goto out_free2; 1420 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1421 if (r) 1422 goto out_free2; 1423 r = 0; 1424 out_free2: 1425 kfree(kvm_regs); 1426 break; 1427 } 1428 case KVM_GET_SREGS: { 1429 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1430 r = -ENOMEM; 1431 if (!kvm_sregs) 1432 goto out; 1433 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1434 if (r) 1435 goto out; 1436 r = -EFAULT; 1437 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1438 goto out; 1439 r = 0; 1440 break; 1441 } 1442 case KVM_SET_SREGS: { 1443 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1444 r = -ENOMEM; 1445 if (!kvm_sregs) 1446 goto out; 1447 r = -EFAULT; 1448 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1449 goto out; 1450 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1451 if (r) 1452 goto out; 1453 r = 0; 1454 break; 1455 } 1456 case KVM_GET_MP_STATE: { 1457 struct kvm_mp_state mp_state; 1458 1459 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1460 if (r) 1461 goto out; 1462 r = -EFAULT; 1463 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1464 goto out; 1465 r = 0; 1466 break; 1467 } 1468 case KVM_SET_MP_STATE: { 1469 struct kvm_mp_state mp_state; 1470 1471 r = -EFAULT; 1472 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1473 goto out; 1474 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1475 if (r) 1476 goto out; 1477 r = 0; 1478 break; 1479 } 1480 case KVM_TRANSLATE: { 1481 struct kvm_translation tr; 1482 1483 r = -EFAULT; 1484 if (copy_from_user(&tr, argp, sizeof tr)) 1485 goto out; 1486 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1487 if (r) 1488 goto out; 1489 r = -EFAULT; 1490 if (copy_to_user(argp, &tr, sizeof tr)) 1491 goto out; 1492 r = 0; 1493 break; 1494 } 1495 case KVM_SET_GUEST_DEBUG: { 1496 struct kvm_guest_debug dbg; 1497 1498 r = -EFAULT; 1499 if (copy_from_user(&dbg, argp, sizeof dbg)) 1500 goto out; 1501 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1502 if (r) 1503 goto out; 1504 r = 0; 1505 break; 1506 } 1507 case KVM_SET_SIGNAL_MASK: { 1508 struct kvm_signal_mask __user *sigmask_arg = argp; 1509 struct kvm_signal_mask kvm_sigmask; 1510 sigset_t sigset, *p; 1511 1512 p = NULL; 1513 if (argp) { 1514 r = -EFAULT; 1515 if (copy_from_user(&kvm_sigmask, argp, 1516 sizeof kvm_sigmask)) 1517 goto out; 1518 r = -EINVAL; 1519 if (kvm_sigmask.len != sizeof sigset) 1520 goto out; 1521 r = -EFAULT; 1522 if (copy_from_user(&sigset, sigmask_arg->sigset, 1523 sizeof sigset)) 1524 goto out; 1525 p = &sigset; 1526 } 1527 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1528 break; 1529 } 1530 case KVM_GET_FPU: { 1531 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1532 r = -ENOMEM; 1533 if (!fpu) 1534 goto out; 1535 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1536 if (r) 1537 goto out; 1538 r = -EFAULT; 1539 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1540 goto out; 1541 r = 0; 1542 break; 1543 } 1544 case KVM_SET_FPU: { 1545 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1546 r = -ENOMEM; 1547 if (!fpu) 1548 goto out; 1549 r = -EFAULT; 1550 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1551 goto out; 1552 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1553 if (r) 1554 goto out; 1555 r = 0; 1556 break; 1557 } 1558 default: 1559 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1560 } 1561 out: 1562 kfree(fpu); 1563 kfree(kvm_sregs); 1564 return r; 1565 } 1566 1567 static long kvm_vm_ioctl(struct file *filp, 1568 unsigned int ioctl, unsigned long arg) 1569 { 1570 struct kvm *kvm = filp->private_data; 1571 void __user *argp = (void __user *)arg; 1572 int r; 1573 1574 if (kvm->mm != current->mm) 1575 return -EIO; 1576 switch (ioctl) { 1577 case KVM_CREATE_VCPU: 1578 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1579 if (r < 0) 1580 goto out; 1581 break; 1582 case KVM_SET_USER_MEMORY_REGION: { 1583 struct kvm_userspace_memory_region kvm_userspace_mem; 1584 1585 r = -EFAULT; 1586 if (copy_from_user(&kvm_userspace_mem, argp, 1587 sizeof kvm_userspace_mem)) 1588 goto out; 1589 1590 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1591 if (r) 1592 goto out; 1593 break; 1594 } 1595 case KVM_GET_DIRTY_LOG: { 1596 struct kvm_dirty_log log; 1597 1598 r = -EFAULT; 1599 if (copy_from_user(&log, argp, sizeof log)) 1600 goto out; 1601 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1602 if (r) 1603 goto out; 1604 break; 1605 } 1606 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1607 case KVM_REGISTER_COALESCED_MMIO: { 1608 struct kvm_coalesced_mmio_zone zone; 1609 r = -EFAULT; 1610 if (copy_from_user(&zone, argp, sizeof zone)) 1611 goto out; 1612 r = -ENXIO; 1613 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1614 if (r) 1615 goto out; 1616 r = 0; 1617 break; 1618 } 1619 case KVM_UNREGISTER_COALESCED_MMIO: { 1620 struct kvm_coalesced_mmio_zone zone; 1621 r = -EFAULT; 1622 if (copy_from_user(&zone, argp, sizeof zone)) 1623 goto out; 1624 r = -ENXIO; 1625 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1626 if (r) 1627 goto out; 1628 r = 0; 1629 break; 1630 } 1631 #endif 1632 case KVM_IRQFD: { 1633 struct kvm_irqfd data; 1634 1635 r = -EFAULT; 1636 if (copy_from_user(&data, argp, sizeof data)) 1637 goto out; 1638 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1639 break; 1640 } 1641 case KVM_IOEVENTFD: { 1642 struct kvm_ioeventfd data; 1643 1644 r = -EFAULT; 1645 if (copy_from_user(&data, argp, sizeof data)) 1646 goto out; 1647 r = kvm_ioeventfd(kvm, &data); 1648 break; 1649 } 1650 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1651 case KVM_SET_BOOT_CPU_ID: 1652 r = 0; 1653 mutex_lock(&kvm->lock); 1654 if (atomic_read(&kvm->online_vcpus) != 0) 1655 r = -EBUSY; 1656 else 1657 kvm->bsp_vcpu_id = arg; 1658 mutex_unlock(&kvm->lock); 1659 break; 1660 #endif 1661 default: 1662 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1663 if (r == -ENOTTY) 1664 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1665 } 1666 out: 1667 return r; 1668 } 1669 1670 #ifdef CONFIG_COMPAT 1671 struct compat_kvm_dirty_log { 1672 __u32 slot; 1673 __u32 padding1; 1674 union { 1675 compat_uptr_t dirty_bitmap; /* one bit per page */ 1676 __u64 padding2; 1677 }; 1678 }; 1679 1680 static long kvm_vm_compat_ioctl(struct file *filp, 1681 unsigned int ioctl, unsigned long arg) 1682 { 1683 struct kvm *kvm = filp->private_data; 1684 int r; 1685 1686 if (kvm->mm != current->mm) 1687 return -EIO; 1688 switch (ioctl) { 1689 case KVM_GET_DIRTY_LOG: { 1690 struct compat_kvm_dirty_log compat_log; 1691 struct kvm_dirty_log log; 1692 1693 r = -EFAULT; 1694 if (copy_from_user(&compat_log, (void __user *)arg, 1695 sizeof(compat_log))) 1696 goto out; 1697 log.slot = compat_log.slot; 1698 log.padding1 = compat_log.padding1; 1699 log.padding2 = compat_log.padding2; 1700 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1701 1702 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1703 if (r) 1704 goto out; 1705 break; 1706 } 1707 default: 1708 r = kvm_vm_ioctl(filp, ioctl, arg); 1709 } 1710 1711 out: 1712 return r; 1713 } 1714 #endif 1715 1716 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1717 { 1718 struct page *page[1]; 1719 unsigned long addr; 1720 int npages; 1721 gfn_t gfn = vmf->pgoff; 1722 struct kvm *kvm = vma->vm_file->private_data; 1723 1724 addr = gfn_to_hva(kvm, gfn); 1725 if (kvm_is_error_hva(addr)) 1726 return VM_FAULT_SIGBUS; 1727 1728 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1729 NULL); 1730 if (unlikely(npages != 1)) 1731 return VM_FAULT_SIGBUS; 1732 1733 vmf->page = page[0]; 1734 return 0; 1735 } 1736 1737 static const struct vm_operations_struct kvm_vm_vm_ops = { 1738 .fault = kvm_vm_fault, 1739 }; 1740 1741 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1742 { 1743 vma->vm_ops = &kvm_vm_vm_ops; 1744 return 0; 1745 } 1746 1747 static struct file_operations kvm_vm_fops = { 1748 .release = kvm_vm_release, 1749 .unlocked_ioctl = kvm_vm_ioctl, 1750 #ifdef CONFIG_COMPAT 1751 .compat_ioctl = kvm_vm_compat_ioctl, 1752 #endif 1753 .mmap = kvm_vm_mmap, 1754 }; 1755 1756 static int kvm_dev_ioctl_create_vm(void) 1757 { 1758 int fd; 1759 struct kvm *kvm; 1760 1761 kvm = kvm_create_vm(); 1762 if (IS_ERR(kvm)) 1763 return PTR_ERR(kvm); 1764 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1765 if (fd < 0) 1766 kvm_put_kvm(kvm); 1767 1768 return fd; 1769 } 1770 1771 static long kvm_dev_ioctl_check_extension_generic(long arg) 1772 { 1773 switch (arg) { 1774 case KVM_CAP_USER_MEMORY: 1775 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1776 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1777 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1778 case KVM_CAP_SET_BOOT_CPU_ID: 1779 #endif 1780 case KVM_CAP_INTERNAL_ERROR_DATA: 1781 return 1; 1782 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1783 case KVM_CAP_IRQ_ROUTING: 1784 return KVM_MAX_IRQ_ROUTES; 1785 #endif 1786 default: 1787 break; 1788 } 1789 return kvm_dev_ioctl_check_extension(arg); 1790 } 1791 1792 static long kvm_dev_ioctl(struct file *filp, 1793 unsigned int ioctl, unsigned long arg) 1794 { 1795 long r = -EINVAL; 1796 1797 switch (ioctl) { 1798 case KVM_GET_API_VERSION: 1799 r = -EINVAL; 1800 if (arg) 1801 goto out; 1802 r = KVM_API_VERSION; 1803 break; 1804 case KVM_CREATE_VM: 1805 r = -EINVAL; 1806 if (arg) 1807 goto out; 1808 r = kvm_dev_ioctl_create_vm(); 1809 break; 1810 case KVM_CHECK_EXTENSION: 1811 r = kvm_dev_ioctl_check_extension_generic(arg); 1812 break; 1813 case KVM_GET_VCPU_MMAP_SIZE: 1814 r = -EINVAL; 1815 if (arg) 1816 goto out; 1817 r = PAGE_SIZE; /* struct kvm_run */ 1818 #ifdef CONFIG_X86 1819 r += PAGE_SIZE; /* pio data page */ 1820 #endif 1821 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1822 r += PAGE_SIZE; /* coalesced mmio ring page */ 1823 #endif 1824 break; 1825 case KVM_TRACE_ENABLE: 1826 case KVM_TRACE_PAUSE: 1827 case KVM_TRACE_DISABLE: 1828 r = -EOPNOTSUPP; 1829 break; 1830 default: 1831 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1832 } 1833 out: 1834 return r; 1835 } 1836 1837 static struct file_operations kvm_chardev_ops = { 1838 .unlocked_ioctl = kvm_dev_ioctl, 1839 .compat_ioctl = kvm_dev_ioctl, 1840 }; 1841 1842 static struct miscdevice kvm_dev = { 1843 KVM_MINOR, 1844 "kvm", 1845 &kvm_chardev_ops, 1846 }; 1847 1848 static void hardware_enable(void *junk) 1849 { 1850 int cpu = raw_smp_processor_id(); 1851 int r; 1852 1853 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1854 return; 1855 1856 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1857 1858 r = kvm_arch_hardware_enable(NULL); 1859 1860 if (r) { 1861 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1862 atomic_inc(&hardware_enable_failed); 1863 printk(KERN_INFO "kvm: enabling virtualization on " 1864 "CPU%d failed\n", cpu); 1865 } 1866 } 1867 1868 static void hardware_disable(void *junk) 1869 { 1870 int cpu = raw_smp_processor_id(); 1871 1872 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1873 return; 1874 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1875 kvm_arch_hardware_disable(NULL); 1876 } 1877 1878 static void hardware_disable_all_nolock(void) 1879 { 1880 BUG_ON(!kvm_usage_count); 1881 1882 kvm_usage_count--; 1883 if (!kvm_usage_count) 1884 on_each_cpu(hardware_disable, NULL, 1); 1885 } 1886 1887 static void hardware_disable_all(void) 1888 { 1889 spin_lock(&kvm_lock); 1890 hardware_disable_all_nolock(); 1891 spin_unlock(&kvm_lock); 1892 } 1893 1894 static int hardware_enable_all(void) 1895 { 1896 int r = 0; 1897 1898 spin_lock(&kvm_lock); 1899 1900 kvm_usage_count++; 1901 if (kvm_usage_count == 1) { 1902 atomic_set(&hardware_enable_failed, 0); 1903 on_each_cpu(hardware_enable, NULL, 1); 1904 1905 if (atomic_read(&hardware_enable_failed)) { 1906 hardware_disable_all_nolock(); 1907 r = -EBUSY; 1908 } 1909 } 1910 1911 spin_unlock(&kvm_lock); 1912 1913 return r; 1914 } 1915 1916 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1917 void *v) 1918 { 1919 int cpu = (long)v; 1920 1921 if (!kvm_usage_count) 1922 return NOTIFY_OK; 1923 1924 val &= ~CPU_TASKS_FROZEN; 1925 switch (val) { 1926 case CPU_DYING: 1927 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1928 cpu); 1929 hardware_disable(NULL); 1930 break; 1931 case CPU_UP_CANCELED: 1932 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1933 cpu); 1934 smp_call_function_single(cpu, hardware_disable, NULL, 1); 1935 break; 1936 case CPU_ONLINE: 1937 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1938 cpu); 1939 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1940 break; 1941 } 1942 return NOTIFY_OK; 1943 } 1944 1945 1946 asmlinkage void kvm_handle_fault_on_reboot(void) 1947 { 1948 if (kvm_rebooting) 1949 /* spin while reset goes on */ 1950 while (true) 1951 ; 1952 /* Fault while not rebooting. We want the trace. */ 1953 BUG(); 1954 } 1955 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1956 1957 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1958 void *v) 1959 { 1960 /* 1961 * Some (well, at least mine) BIOSes hang on reboot if 1962 * in vmx root mode. 1963 * 1964 * And Intel TXT required VMX off for all cpu when system shutdown. 1965 */ 1966 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1967 kvm_rebooting = true; 1968 on_each_cpu(hardware_disable, NULL, 1); 1969 return NOTIFY_OK; 1970 } 1971 1972 static struct notifier_block kvm_reboot_notifier = { 1973 .notifier_call = kvm_reboot, 1974 .priority = 0, 1975 }; 1976 1977 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 1978 { 1979 int i; 1980 1981 for (i = 0; i < bus->dev_count; i++) { 1982 struct kvm_io_device *pos = bus->devs[i]; 1983 1984 kvm_iodevice_destructor(pos); 1985 } 1986 kfree(bus); 1987 } 1988 1989 /* kvm_io_bus_write - called under kvm->slots_lock */ 1990 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 1991 int len, const void *val) 1992 { 1993 int i; 1994 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 1995 for (i = 0; i < bus->dev_count; i++) 1996 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1997 return 0; 1998 return -EOPNOTSUPP; 1999 } 2000 2001 /* kvm_io_bus_read - called under kvm->slots_lock */ 2002 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2003 int len, void *val) 2004 { 2005 int i; 2006 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 2007 2008 for (i = 0; i < bus->dev_count; i++) 2009 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2010 return 0; 2011 return -EOPNOTSUPP; 2012 } 2013 2014 /* Caller must hold slots_lock. */ 2015 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2016 struct kvm_io_device *dev) 2017 { 2018 struct kvm_io_bus *new_bus, *bus; 2019 2020 bus = kvm->buses[bus_idx]; 2021 if (bus->dev_count > NR_IOBUS_DEVS-1) 2022 return -ENOSPC; 2023 2024 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2025 if (!new_bus) 2026 return -ENOMEM; 2027 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2028 new_bus->devs[new_bus->dev_count++] = dev; 2029 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2030 synchronize_srcu_expedited(&kvm->srcu); 2031 kfree(bus); 2032 2033 return 0; 2034 } 2035 2036 /* Caller must hold slots_lock. */ 2037 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2038 struct kvm_io_device *dev) 2039 { 2040 int i, r; 2041 struct kvm_io_bus *new_bus, *bus; 2042 2043 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2044 if (!new_bus) 2045 return -ENOMEM; 2046 2047 bus = kvm->buses[bus_idx]; 2048 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2049 2050 r = -ENOENT; 2051 for (i = 0; i < new_bus->dev_count; i++) 2052 if (new_bus->devs[i] == dev) { 2053 r = 0; 2054 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2055 break; 2056 } 2057 2058 if (r) { 2059 kfree(new_bus); 2060 return r; 2061 } 2062 2063 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2064 synchronize_srcu_expedited(&kvm->srcu); 2065 kfree(bus); 2066 return r; 2067 } 2068 2069 static struct notifier_block kvm_cpu_notifier = { 2070 .notifier_call = kvm_cpu_hotplug, 2071 .priority = 20, /* must be > scheduler priority */ 2072 }; 2073 2074 static int vm_stat_get(void *_offset, u64 *val) 2075 { 2076 unsigned offset = (long)_offset; 2077 struct kvm *kvm; 2078 2079 *val = 0; 2080 spin_lock(&kvm_lock); 2081 list_for_each_entry(kvm, &vm_list, vm_list) 2082 *val += *(u32 *)((void *)kvm + offset); 2083 spin_unlock(&kvm_lock); 2084 return 0; 2085 } 2086 2087 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2088 2089 static int vcpu_stat_get(void *_offset, u64 *val) 2090 { 2091 unsigned offset = (long)_offset; 2092 struct kvm *kvm; 2093 struct kvm_vcpu *vcpu; 2094 int i; 2095 2096 *val = 0; 2097 spin_lock(&kvm_lock); 2098 list_for_each_entry(kvm, &vm_list, vm_list) 2099 kvm_for_each_vcpu(i, vcpu, kvm) 2100 *val += *(u32 *)((void *)vcpu + offset); 2101 2102 spin_unlock(&kvm_lock); 2103 return 0; 2104 } 2105 2106 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2107 2108 static const struct file_operations *stat_fops[] = { 2109 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2110 [KVM_STAT_VM] = &vm_stat_fops, 2111 }; 2112 2113 static void kvm_init_debug(void) 2114 { 2115 struct kvm_stats_debugfs_item *p; 2116 2117 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2118 for (p = debugfs_entries; p->name; ++p) 2119 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2120 (void *)(long)p->offset, 2121 stat_fops[p->kind]); 2122 } 2123 2124 static void kvm_exit_debug(void) 2125 { 2126 struct kvm_stats_debugfs_item *p; 2127 2128 for (p = debugfs_entries; p->name; ++p) 2129 debugfs_remove(p->dentry); 2130 debugfs_remove(kvm_debugfs_dir); 2131 } 2132 2133 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2134 { 2135 if (kvm_usage_count) 2136 hardware_disable(NULL); 2137 return 0; 2138 } 2139 2140 static int kvm_resume(struct sys_device *dev) 2141 { 2142 if (kvm_usage_count) 2143 hardware_enable(NULL); 2144 return 0; 2145 } 2146 2147 static struct sysdev_class kvm_sysdev_class = { 2148 .name = "kvm", 2149 .suspend = kvm_suspend, 2150 .resume = kvm_resume, 2151 }; 2152 2153 static struct sys_device kvm_sysdev = { 2154 .id = 0, 2155 .cls = &kvm_sysdev_class, 2156 }; 2157 2158 struct page *bad_page; 2159 pfn_t bad_pfn; 2160 2161 static inline 2162 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2163 { 2164 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2165 } 2166 2167 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2168 { 2169 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2170 2171 kvm_arch_vcpu_load(vcpu, cpu); 2172 } 2173 2174 static void kvm_sched_out(struct preempt_notifier *pn, 2175 struct task_struct *next) 2176 { 2177 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2178 2179 kvm_arch_vcpu_put(vcpu); 2180 } 2181 2182 int kvm_init(void *opaque, unsigned int vcpu_size, 2183 struct module *module) 2184 { 2185 int r; 2186 int cpu; 2187 2188 r = kvm_arch_init(opaque); 2189 if (r) 2190 goto out_fail; 2191 2192 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2193 2194 if (bad_page == NULL) { 2195 r = -ENOMEM; 2196 goto out; 2197 } 2198 2199 bad_pfn = page_to_pfn(bad_page); 2200 2201 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2202 r = -ENOMEM; 2203 goto out_free_0; 2204 } 2205 2206 r = kvm_arch_hardware_setup(); 2207 if (r < 0) 2208 goto out_free_0a; 2209 2210 for_each_online_cpu(cpu) { 2211 smp_call_function_single(cpu, 2212 kvm_arch_check_processor_compat, 2213 &r, 1); 2214 if (r < 0) 2215 goto out_free_1; 2216 } 2217 2218 r = register_cpu_notifier(&kvm_cpu_notifier); 2219 if (r) 2220 goto out_free_2; 2221 register_reboot_notifier(&kvm_reboot_notifier); 2222 2223 r = sysdev_class_register(&kvm_sysdev_class); 2224 if (r) 2225 goto out_free_3; 2226 2227 r = sysdev_register(&kvm_sysdev); 2228 if (r) 2229 goto out_free_4; 2230 2231 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2232 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, 2233 __alignof__(struct kvm_vcpu), 2234 0, NULL); 2235 if (!kvm_vcpu_cache) { 2236 r = -ENOMEM; 2237 goto out_free_5; 2238 } 2239 2240 kvm_chardev_ops.owner = module; 2241 kvm_vm_fops.owner = module; 2242 kvm_vcpu_fops.owner = module; 2243 2244 r = misc_register(&kvm_dev); 2245 if (r) { 2246 printk(KERN_ERR "kvm: misc device register failed\n"); 2247 goto out_free; 2248 } 2249 2250 kvm_preempt_ops.sched_in = kvm_sched_in; 2251 kvm_preempt_ops.sched_out = kvm_sched_out; 2252 2253 kvm_init_debug(); 2254 2255 return 0; 2256 2257 out_free: 2258 kmem_cache_destroy(kvm_vcpu_cache); 2259 out_free_5: 2260 sysdev_unregister(&kvm_sysdev); 2261 out_free_4: 2262 sysdev_class_unregister(&kvm_sysdev_class); 2263 out_free_3: 2264 unregister_reboot_notifier(&kvm_reboot_notifier); 2265 unregister_cpu_notifier(&kvm_cpu_notifier); 2266 out_free_2: 2267 out_free_1: 2268 kvm_arch_hardware_unsetup(); 2269 out_free_0a: 2270 free_cpumask_var(cpus_hardware_enabled); 2271 out_free_0: 2272 __free_page(bad_page); 2273 out: 2274 kvm_arch_exit(); 2275 out_fail: 2276 return r; 2277 } 2278 EXPORT_SYMBOL_GPL(kvm_init); 2279 2280 void kvm_exit(void) 2281 { 2282 tracepoint_synchronize_unregister(); 2283 kvm_exit_debug(); 2284 misc_deregister(&kvm_dev); 2285 kmem_cache_destroy(kvm_vcpu_cache); 2286 sysdev_unregister(&kvm_sysdev); 2287 sysdev_class_unregister(&kvm_sysdev_class); 2288 unregister_reboot_notifier(&kvm_reboot_notifier); 2289 unregister_cpu_notifier(&kvm_cpu_notifier); 2290 on_each_cpu(hardware_disable, NULL, 1); 2291 kvm_arch_hardware_unsetup(); 2292 kvm_arch_exit(); 2293 free_cpumask_var(cpus_hardware_enabled); 2294 __free_page(bad_page); 2295 } 2296 EXPORT_SYMBOL_GPL(kvm_exit); 2297