1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * 9 * Authors: 10 * Avi Kivity <avi@qumranet.com> 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * 13 * This work is licensed under the terms of the GNU GPL, version 2. See 14 * the COPYING file in the top-level directory. 15 * 16 */ 17 18 #include "iodev.h" 19 20 #include <linux/kvm_host.h> 21 #include <linux/kvm.h> 22 #include <linux/module.h> 23 #include <linux/errno.h> 24 #include <linux/percpu.h> 25 #include <linux/mm.h> 26 #include <linux/miscdevice.h> 27 #include <linux/vmalloc.h> 28 #include <linux/reboot.h> 29 #include <linux/debugfs.h> 30 #include <linux/highmem.h> 31 #include <linux/file.h> 32 #include <linux/sysdev.h> 33 #include <linux/cpu.h> 34 #include <linux/sched.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 50 #include <asm/processor.h> 51 #include <asm/io.h> 52 #include <asm/uaccess.h> 53 #include <asm/pgtable.h> 54 #include <asm-generic/bitops/le.h> 55 56 #include "coalesced_mmio.h" 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/kvm.h> 60 61 MODULE_AUTHOR("Qumranet"); 62 MODULE_LICENSE("GPL"); 63 64 /* 65 * Ordering of locks: 66 * 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 68 */ 69 70 DEFINE_SPINLOCK(kvm_lock); 71 LIST_HEAD(vm_list); 72 73 static cpumask_var_t cpus_hardware_enabled; 74 static int kvm_usage_count = 0; 75 static atomic_t hardware_enable_failed; 76 77 struct kmem_cache *kvm_vcpu_cache; 78 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 79 80 static __read_mostly struct preempt_ops kvm_preempt_ops; 81 82 struct dentry *kvm_debugfs_dir; 83 84 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 85 unsigned long arg); 86 static int hardware_enable_all(void); 87 static void hardware_disable_all(void); 88 89 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 90 91 static bool kvm_rebooting; 92 93 static bool largepages_enabled = true; 94 95 inline int kvm_is_mmio_pfn(pfn_t pfn) 96 { 97 if (pfn_valid(pfn)) { 98 struct page *page = compound_head(pfn_to_page(pfn)); 99 return PageReserved(page); 100 } 101 102 return true; 103 } 104 105 /* 106 * Switches to specified vcpu, until a matching vcpu_put() 107 */ 108 void vcpu_load(struct kvm_vcpu *vcpu) 109 { 110 int cpu; 111 112 mutex_lock(&vcpu->mutex); 113 cpu = get_cpu(); 114 preempt_notifier_register(&vcpu->preempt_notifier); 115 kvm_arch_vcpu_load(vcpu, cpu); 116 put_cpu(); 117 } 118 119 void vcpu_put(struct kvm_vcpu *vcpu) 120 { 121 preempt_disable(); 122 kvm_arch_vcpu_put(vcpu); 123 preempt_notifier_unregister(&vcpu->preempt_notifier); 124 preempt_enable(); 125 mutex_unlock(&vcpu->mutex); 126 } 127 128 static void ack_flush(void *_completed) 129 { 130 } 131 132 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 133 { 134 int i, cpu, me; 135 cpumask_var_t cpus; 136 bool called = true; 137 struct kvm_vcpu *vcpu; 138 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 140 141 raw_spin_lock(&kvm->requests_lock); 142 me = smp_processor_id(); 143 kvm_for_each_vcpu(i, vcpu, kvm) { 144 if (test_and_set_bit(req, &vcpu->requests)) 145 continue; 146 cpu = vcpu->cpu; 147 if (cpus != NULL && cpu != -1 && cpu != me) 148 cpumask_set_cpu(cpu, cpus); 149 } 150 if (unlikely(cpus == NULL)) 151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 152 else if (!cpumask_empty(cpus)) 153 smp_call_function_many(cpus, ack_flush, NULL, 1); 154 else 155 called = false; 156 raw_spin_unlock(&kvm->requests_lock); 157 free_cpumask_var(cpus); 158 return called; 159 } 160 161 void kvm_flush_remote_tlbs(struct kvm *kvm) 162 { 163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 164 ++kvm->stat.remote_tlb_flush; 165 } 166 167 void kvm_reload_remote_mmus(struct kvm *kvm) 168 { 169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 170 } 171 172 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 173 { 174 struct page *page; 175 int r; 176 177 mutex_init(&vcpu->mutex); 178 vcpu->cpu = -1; 179 vcpu->kvm = kvm; 180 vcpu->vcpu_id = id; 181 init_waitqueue_head(&vcpu->wq); 182 183 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 184 if (!page) { 185 r = -ENOMEM; 186 goto fail; 187 } 188 vcpu->run = page_address(page); 189 190 r = kvm_arch_vcpu_init(vcpu); 191 if (r < 0) 192 goto fail_free_run; 193 return 0; 194 195 fail_free_run: 196 free_page((unsigned long)vcpu->run); 197 fail: 198 return r; 199 } 200 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 201 202 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 203 { 204 kvm_arch_vcpu_uninit(vcpu); 205 free_page((unsigned long)vcpu->run); 206 } 207 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 208 209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 210 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 211 { 212 return container_of(mn, struct kvm, mmu_notifier); 213 } 214 215 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 216 struct mm_struct *mm, 217 unsigned long address) 218 { 219 struct kvm *kvm = mmu_notifier_to_kvm(mn); 220 int need_tlb_flush, idx; 221 222 /* 223 * When ->invalidate_page runs, the linux pte has been zapped 224 * already but the page is still allocated until 225 * ->invalidate_page returns. So if we increase the sequence 226 * here the kvm page fault will notice if the spte can't be 227 * established because the page is going to be freed. If 228 * instead the kvm page fault establishes the spte before 229 * ->invalidate_page runs, kvm_unmap_hva will release it 230 * before returning. 231 * 232 * The sequence increase only need to be seen at spin_unlock 233 * time, and not at spin_lock time. 234 * 235 * Increasing the sequence after the spin_unlock would be 236 * unsafe because the kvm page fault could then establish the 237 * pte after kvm_unmap_hva returned, without noticing the page 238 * is going to be freed. 239 */ 240 idx = srcu_read_lock(&kvm->srcu); 241 spin_lock(&kvm->mmu_lock); 242 kvm->mmu_notifier_seq++; 243 need_tlb_flush = kvm_unmap_hva(kvm, address); 244 spin_unlock(&kvm->mmu_lock); 245 srcu_read_unlock(&kvm->srcu, idx); 246 247 /* we've to flush the tlb before the pages can be freed */ 248 if (need_tlb_flush) 249 kvm_flush_remote_tlbs(kvm); 250 251 } 252 253 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 254 struct mm_struct *mm, 255 unsigned long address, 256 pte_t pte) 257 { 258 struct kvm *kvm = mmu_notifier_to_kvm(mn); 259 int idx; 260 261 idx = srcu_read_lock(&kvm->srcu); 262 spin_lock(&kvm->mmu_lock); 263 kvm->mmu_notifier_seq++; 264 kvm_set_spte_hva(kvm, address, pte); 265 spin_unlock(&kvm->mmu_lock); 266 srcu_read_unlock(&kvm->srcu, idx); 267 } 268 269 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 270 struct mm_struct *mm, 271 unsigned long start, 272 unsigned long end) 273 { 274 struct kvm *kvm = mmu_notifier_to_kvm(mn); 275 int need_tlb_flush = 0, idx; 276 277 idx = srcu_read_lock(&kvm->srcu); 278 spin_lock(&kvm->mmu_lock); 279 /* 280 * The count increase must become visible at unlock time as no 281 * spte can be established without taking the mmu_lock and 282 * count is also read inside the mmu_lock critical section. 283 */ 284 kvm->mmu_notifier_count++; 285 for (; start < end; start += PAGE_SIZE) 286 need_tlb_flush |= kvm_unmap_hva(kvm, start); 287 spin_unlock(&kvm->mmu_lock); 288 srcu_read_unlock(&kvm->srcu, idx); 289 290 /* we've to flush the tlb before the pages can be freed */ 291 if (need_tlb_flush) 292 kvm_flush_remote_tlbs(kvm); 293 } 294 295 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 296 struct mm_struct *mm, 297 unsigned long start, 298 unsigned long end) 299 { 300 struct kvm *kvm = mmu_notifier_to_kvm(mn); 301 302 spin_lock(&kvm->mmu_lock); 303 /* 304 * This sequence increase will notify the kvm page fault that 305 * the page that is going to be mapped in the spte could have 306 * been freed. 307 */ 308 kvm->mmu_notifier_seq++; 309 /* 310 * The above sequence increase must be visible before the 311 * below count decrease but both values are read by the kvm 312 * page fault under mmu_lock spinlock so we don't need to add 313 * a smb_wmb() here in between the two. 314 */ 315 kvm->mmu_notifier_count--; 316 spin_unlock(&kvm->mmu_lock); 317 318 BUG_ON(kvm->mmu_notifier_count < 0); 319 } 320 321 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 322 struct mm_struct *mm, 323 unsigned long address) 324 { 325 struct kvm *kvm = mmu_notifier_to_kvm(mn); 326 int young, idx; 327 328 idx = srcu_read_lock(&kvm->srcu); 329 spin_lock(&kvm->mmu_lock); 330 young = kvm_age_hva(kvm, address); 331 spin_unlock(&kvm->mmu_lock); 332 srcu_read_unlock(&kvm->srcu, idx); 333 334 if (young) 335 kvm_flush_remote_tlbs(kvm); 336 337 return young; 338 } 339 340 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 341 struct mm_struct *mm) 342 { 343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 344 kvm_arch_flush_shadow(kvm); 345 } 346 347 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 348 .invalidate_page = kvm_mmu_notifier_invalidate_page, 349 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 350 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 351 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 352 .change_pte = kvm_mmu_notifier_change_pte, 353 .release = kvm_mmu_notifier_release, 354 }; 355 356 static int kvm_init_mmu_notifier(struct kvm *kvm) 357 { 358 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 359 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 360 } 361 362 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 363 364 static int kvm_init_mmu_notifier(struct kvm *kvm) 365 { 366 return 0; 367 } 368 369 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 370 371 static struct kvm *kvm_create_vm(void) 372 { 373 int r = 0, i; 374 struct kvm *kvm = kvm_arch_create_vm(); 375 376 if (IS_ERR(kvm)) 377 goto out; 378 379 r = hardware_enable_all(); 380 if (r) 381 goto out_err_nodisable; 382 383 #ifdef CONFIG_HAVE_KVM_IRQCHIP 384 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 385 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 386 #endif 387 388 r = -ENOMEM; 389 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 390 if (!kvm->memslots) 391 goto out_err; 392 if (init_srcu_struct(&kvm->srcu)) 393 goto out_err; 394 for (i = 0; i < KVM_NR_BUSES; i++) { 395 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 396 GFP_KERNEL); 397 if (!kvm->buses[i]) { 398 cleanup_srcu_struct(&kvm->srcu); 399 goto out_err; 400 } 401 } 402 403 r = kvm_init_mmu_notifier(kvm); 404 if (r) { 405 cleanup_srcu_struct(&kvm->srcu); 406 goto out_err; 407 } 408 409 kvm->mm = current->mm; 410 atomic_inc(&kvm->mm->mm_count); 411 spin_lock_init(&kvm->mmu_lock); 412 raw_spin_lock_init(&kvm->requests_lock); 413 kvm_eventfd_init(kvm); 414 mutex_init(&kvm->lock); 415 mutex_init(&kvm->irq_lock); 416 mutex_init(&kvm->slots_lock); 417 atomic_set(&kvm->users_count, 1); 418 spin_lock(&kvm_lock); 419 list_add(&kvm->vm_list, &vm_list); 420 spin_unlock(&kvm_lock); 421 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 422 kvm_coalesced_mmio_init(kvm); 423 #endif 424 out: 425 return kvm; 426 427 out_err: 428 hardware_disable_all(); 429 out_err_nodisable: 430 for (i = 0; i < KVM_NR_BUSES; i++) 431 kfree(kvm->buses[i]); 432 kfree(kvm->memslots); 433 kfree(kvm); 434 return ERR_PTR(r); 435 } 436 437 /* 438 * Free any memory in @free but not in @dont. 439 */ 440 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 441 struct kvm_memory_slot *dont) 442 { 443 int i; 444 445 if (!dont || free->rmap != dont->rmap) 446 vfree(free->rmap); 447 448 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 449 vfree(free->dirty_bitmap); 450 451 452 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 453 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 454 vfree(free->lpage_info[i]); 455 free->lpage_info[i] = NULL; 456 } 457 } 458 459 free->npages = 0; 460 free->dirty_bitmap = NULL; 461 free->rmap = NULL; 462 } 463 464 void kvm_free_physmem(struct kvm *kvm) 465 { 466 int i; 467 struct kvm_memslots *slots = kvm->memslots; 468 469 for (i = 0; i < slots->nmemslots; ++i) 470 kvm_free_physmem_slot(&slots->memslots[i], NULL); 471 472 kfree(kvm->memslots); 473 } 474 475 static void kvm_destroy_vm(struct kvm *kvm) 476 { 477 int i; 478 struct mm_struct *mm = kvm->mm; 479 480 kvm_arch_sync_events(kvm); 481 spin_lock(&kvm_lock); 482 list_del(&kvm->vm_list); 483 spin_unlock(&kvm_lock); 484 kvm_free_irq_routing(kvm); 485 for (i = 0; i < KVM_NR_BUSES; i++) 486 kvm_io_bus_destroy(kvm->buses[i]); 487 kvm_coalesced_mmio_free(kvm); 488 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 489 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 490 #else 491 kvm_arch_flush_shadow(kvm); 492 #endif 493 kvm_arch_destroy_vm(kvm); 494 hardware_disable_all(); 495 mmdrop(mm); 496 } 497 498 void kvm_get_kvm(struct kvm *kvm) 499 { 500 atomic_inc(&kvm->users_count); 501 } 502 EXPORT_SYMBOL_GPL(kvm_get_kvm); 503 504 void kvm_put_kvm(struct kvm *kvm) 505 { 506 if (atomic_dec_and_test(&kvm->users_count)) 507 kvm_destroy_vm(kvm); 508 } 509 EXPORT_SYMBOL_GPL(kvm_put_kvm); 510 511 512 static int kvm_vm_release(struct inode *inode, struct file *filp) 513 { 514 struct kvm *kvm = filp->private_data; 515 516 kvm_irqfd_release(kvm); 517 518 kvm_put_kvm(kvm); 519 return 0; 520 } 521 522 /* 523 * Allocate some memory and give it an address in the guest physical address 524 * space. 525 * 526 * Discontiguous memory is allowed, mostly for framebuffers. 527 * 528 * Must be called holding mmap_sem for write. 529 */ 530 int __kvm_set_memory_region(struct kvm *kvm, 531 struct kvm_userspace_memory_region *mem, 532 int user_alloc) 533 { 534 int r, flush_shadow = 0; 535 gfn_t base_gfn; 536 unsigned long npages; 537 unsigned long i; 538 struct kvm_memory_slot *memslot; 539 struct kvm_memory_slot old, new; 540 struct kvm_memslots *slots, *old_memslots; 541 542 r = -EINVAL; 543 /* General sanity checks */ 544 if (mem->memory_size & (PAGE_SIZE - 1)) 545 goto out; 546 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 547 goto out; 548 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 549 goto out; 550 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 551 goto out; 552 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 553 goto out; 554 555 memslot = &kvm->memslots->memslots[mem->slot]; 556 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 557 npages = mem->memory_size >> PAGE_SHIFT; 558 559 if (!npages) 560 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 561 562 new = old = *memslot; 563 564 new.base_gfn = base_gfn; 565 new.npages = npages; 566 new.flags = mem->flags; 567 568 /* Disallow changing a memory slot's size. */ 569 r = -EINVAL; 570 if (npages && old.npages && npages != old.npages) 571 goto out_free; 572 573 /* Check for overlaps */ 574 r = -EEXIST; 575 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 576 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 577 578 if (s == memslot || !s->npages) 579 continue; 580 if (!((base_gfn + npages <= s->base_gfn) || 581 (base_gfn >= s->base_gfn + s->npages))) 582 goto out_free; 583 } 584 585 /* Free page dirty bitmap if unneeded */ 586 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 587 new.dirty_bitmap = NULL; 588 589 r = -ENOMEM; 590 591 /* Allocate if a slot is being created */ 592 #ifndef CONFIG_S390 593 if (npages && !new.rmap) { 594 new.rmap = vmalloc(npages * sizeof(struct page *)); 595 596 if (!new.rmap) 597 goto out_free; 598 599 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 600 601 new.user_alloc = user_alloc; 602 new.userspace_addr = mem->userspace_addr; 603 } 604 if (!npages) 605 goto skip_lpage; 606 607 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 608 unsigned long ugfn; 609 unsigned long j; 610 int lpages; 611 int level = i + 2; 612 613 /* Avoid unused variable warning if no large pages */ 614 (void)level; 615 616 if (new.lpage_info[i]) 617 continue; 618 619 lpages = 1 + (base_gfn + npages - 1) / 620 KVM_PAGES_PER_HPAGE(level); 621 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 622 623 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 624 625 if (!new.lpage_info[i]) 626 goto out_free; 627 628 memset(new.lpage_info[i], 0, 629 lpages * sizeof(*new.lpage_info[i])); 630 631 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 632 new.lpage_info[i][0].write_count = 1; 633 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 634 new.lpage_info[i][lpages - 1].write_count = 1; 635 ugfn = new.userspace_addr >> PAGE_SHIFT; 636 /* 637 * If the gfn and userspace address are not aligned wrt each 638 * other, or if explicitly asked to, disable large page 639 * support for this slot 640 */ 641 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 642 !largepages_enabled) 643 for (j = 0; j < lpages; ++j) 644 new.lpage_info[i][j].write_count = 1; 645 } 646 647 skip_lpage: 648 649 /* Allocate page dirty bitmap if needed */ 650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 651 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 652 653 new.dirty_bitmap = vmalloc(dirty_bytes); 654 if (!new.dirty_bitmap) 655 goto out_free; 656 memset(new.dirty_bitmap, 0, dirty_bytes); 657 /* destroy any largepage mappings for dirty tracking */ 658 if (old.npages) 659 flush_shadow = 1; 660 } 661 #else /* not defined CONFIG_S390 */ 662 new.user_alloc = user_alloc; 663 if (user_alloc) 664 new.userspace_addr = mem->userspace_addr; 665 #endif /* not defined CONFIG_S390 */ 666 667 if (!npages) { 668 r = -ENOMEM; 669 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 670 if (!slots) 671 goto out_free; 672 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 673 if (mem->slot >= slots->nmemslots) 674 slots->nmemslots = mem->slot + 1; 675 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 676 677 old_memslots = kvm->memslots; 678 rcu_assign_pointer(kvm->memslots, slots); 679 synchronize_srcu_expedited(&kvm->srcu); 680 /* From this point no new shadow pages pointing to a deleted 681 * memslot will be created. 682 * 683 * validation of sp->gfn happens in: 684 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 685 * - kvm_is_visible_gfn (mmu_check_roots) 686 */ 687 kvm_arch_flush_shadow(kvm); 688 kfree(old_memslots); 689 } 690 691 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 692 if (r) 693 goto out_free; 694 695 #ifdef CONFIG_DMAR 696 /* map the pages in iommu page table */ 697 if (npages) { 698 r = kvm_iommu_map_pages(kvm, &new); 699 if (r) 700 goto out_free; 701 } 702 #endif 703 704 r = -ENOMEM; 705 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 706 if (!slots) 707 goto out_free; 708 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 709 if (mem->slot >= slots->nmemslots) 710 slots->nmemslots = mem->slot + 1; 711 712 /* actual memory is freed via old in kvm_free_physmem_slot below */ 713 if (!npages) { 714 new.rmap = NULL; 715 new.dirty_bitmap = NULL; 716 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 717 new.lpage_info[i] = NULL; 718 } 719 720 slots->memslots[mem->slot] = new; 721 old_memslots = kvm->memslots; 722 rcu_assign_pointer(kvm->memslots, slots); 723 synchronize_srcu_expedited(&kvm->srcu); 724 725 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 726 727 kvm_free_physmem_slot(&old, &new); 728 kfree(old_memslots); 729 730 if (flush_shadow) 731 kvm_arch_flush_shadow(kvm); 732 733 return 0; 734 735 out_free: 736 kvm_free_physmem_slot(&new, &old); 737 out: 738 return r; 739 740 } 741 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 742 743 int kvm_set_memory_region(struct kvm *kvm, 744 struct kvm_userspace_memory_region *mem, 745 int user_alloc) 746 { 747 int r; 748 749 mutex_lock(&kvm->slots_lock); 750 r = __kvm_set_memory_region(kvm, mem, user_alloc); 751 mutex_unlock(&kvm->slots_lock); 752 return r; 753 } 754 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 755 756 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 757 struct 758 kvm_userspace_memory_region *mem, 759 int user_alloc) 760 { 761 if (mem->slot >= KVM_MEMORY_SLOTS) 762 return -EINVAL; 763 return kvm_set_memory_region(kvm, mem, user_alloc); 764 } 765 766 int kvm_get_dirty_log(struct kvm *kvm, 767 struct kvm_dirty_log *log, int *is_dirty) 768 { 769 struct kvm_memory_slot *memslot; 770 int r, i; 771 int n; 772 unsigned long any = 0; 773 774 r = -EINVAL; 775 if (log->slot >= KVM_MEMORY_SLOTS) 776 goto out; 777 778 memslot = &kvm->memslots->memslots[log->slot]; 779 r = -ENOENT; 780 if (!memslot->dirty_bitmap) 781 goto out; 782 783 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 784 785 for (i = 0; !any && i < n/sizeof(long); ++i) 786 any = memslot->dirty_bitmap[i]; 787 788 r = -EFAULT; 789 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 790 goto out; 791 792 if (any) 793 *is_dirty = 1; 794 795 r = 0; 796 out: 797 return r; 798 } 799 800 void kvm_disable_largepages(void) 801 { 802 largepages_enabled = false; 803 } 804 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 805 806 int is_error_page(struct page *page) 807 { 808 return page == bad_page; 809 } 810 EXPORT_SYMBOL_GPL(is_error_page); 811 812 int is_error_pfn(pfn_t pfn) 813 { 814 return pfn == bad_pfn; 815 } 816 EXPORT_SYMBOL_GPL(is_error_pfn); 817 818 static inline unsigned long bad_hva(void) 819 { 820 return PAGE_OFFSET; 821 } 822 823 int kvm_is_error_hva(unsigned long addr) 824 { 825 return addr == bad_hva(); 826 } 827 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 828 829 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 830 { 831 int i; 832 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 833 834 for (i = 0; i < slots->nmemslots; ++i) { 835 struct kvm_memory_slot *memslot = &slots->memslots[i]; 836 837 if (gfn >= memslot->base_gfn 838 && gfn < memslot->base_gfn + memslot->npages) 839 return memslot; 840 } 841 return NULL; 842 } 843 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 844 845 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 846 { 847 gfn = unalias_gfn(kvm, gfn); 848 return gfn_to_memslot_unaliased(kvm, gfn); 849 } 850 851 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 852 { 853 int i; 854 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 855 856 gfn = unalias_gfn_instantiation(kvm, gfn); 857 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 858 struct kvm_memory_slot *memslot = &slots->memslots[i]; 859 860 if (memslot->flags & KVM_MEMSLOT_INVALID) 861 continue; 862 863 if (gfn >= memslot->base_gfn 864 && gfn < memslot->base_gfn + memslot->npages) 865 return 1; 866 } 867 return 0; 868 } 869 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 870 871 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 872 { 873 struct vm_area_struct *vma; 874 unsigned long addr, size; 875 876 size = PAGE_SIZE; 877 878 addr = gfn_to_hva(kvm, gfn); 879 if (kvm_is_error_hva(addr)) 880 return PAGE_SIZE; 881 882 down_read(¤t->mm->mmap_sem); 883 vma = find_vma(current->mm, addr); 884 if (!vma) 885 goto out; 886 887 size = vma_kernel_pagesize(vma); 888 889 out: 890 up_read(¤t->mm->mmap_sem); 891 892 return size; 893 } 894 895 int memslot_id(struct kvm *kvm, gfn_t gfn) 896 { 897 int i; 898 struct kvm_memslots *slots = rcu_dereference(kvm->memslots); 899 struct kvm_memory_slot *memslot = NULL; 900 901 gfn = unalias_gfn(kvm, gfn); 902 for (i = 0; i < slots->nmemslots; ++i) { 903 memslot = &slots->memslots[i]; 904 905 if (gfn >= memslot->base_gfn 906 && gfn < memslot->base_gfn + memslot->npages) 907 break; 908 } 909 910 return memslot - slots->memslots; 911 } 912 913 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 914 { 915 struct kvm_memory_slot *slot; 916 917 gfn = unalias_gfn_instantiation(kvm, gfn); 918 slot = gfn_to_memslot_unaliased(kvm, gfn); 919 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 920 return bad_hva(); 921 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 922 } 923 EXPORT_SYMBOL_GPL(gfn_to_hva); 924 925 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 926 { 927 struct page *page[1]; 928 int npages; 929 pfn_t pfn; 930 931 might_sleep(); 932 933 npages = get_user_pages_fast(addr, 1, 1, page); 934 935 if (unlikely(npages != 1)) { 936 struct vm_area_struct *vma; 937 938 down_read(¤t->mm->mmap_sem); 939 vma = find_vma(current->mm, addr); 940 941 if (vma == NULL || addr < vma->vm_start || 942 !(vma->vm_flags & VM_PFNMAP)) { 943 up_read(¤t->mm->mmap_sem); 944 get_page(bad_page); 945 return page_to_pfn(bad_page); 946 } 947 948 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 949 up_read(¤t->mm->mmap_sem); 950 BUG_ON(!kvm_is_mmio_pfn(pfn)); 951 } else 952 pfn = page_to_pfn(page[0]); 953 954 return pfn; 955 } 956 957 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 958 { 959 unsigned long addr; 960 961 addr = gfn_to_hva(kvm, gfn); 962 if (kvm_is_error_hva(addr)) { 963 get_page(bad_page); 964 return page_to_pfn(bad_page); 965 } 966 967 return hva_to_pfn(kvm, addr); 968 } 969 EXPORT_SYMBOL_GPL(gfn_to_pfn); 970 971 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 972 { 973 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); 974 } 975 976 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 977 struct kvm_memory_slot *slot, gfn_t gfn) 978 { 979 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 980 return hva_to_pfn(kvm, addr); 981 } 982 983 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 984 { 985 pfn_t pfn; 986 987 pfn = gfn_to_pfn(kvm, gfn); 988 if (!kvm_is_mmio_pfn(pfn)) 989 return pfn_to_page(pfn); 990 991 WARN_ON(kvm_is_mmio_pfn(pfn)); 992 993 get_page(bad_page); 994 return bad_page; 995 } 996 997 EXPORT_SYMBOL_GPL(gfn_to_page); 998 999 void kvm_release_page_clean(struct page *page) 1000 { 1001 kvm_release_pfn_clean(page_to_pfn(page)); 1002 } 1003 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1004 1005 void kvm_release_pfn_clean(pfn_t pfn) 1006 { 1007 if (!kvm_is_mmio_pfn(pfn)) 1008 put_page(pfn_to_page(pfn)); 1009 } 1010 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1011 1012 void kvm_release_page_dirty(struct page *page) 1013 { 1014 kvm_release_pfn_dirty(page_to_pfn(page)); 1015 } 1016 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1017 1018 void kvm_release_pfn_dirty(pfn_t pfn) 1019 { 1020 kvm_set_pfn_dirty(pfn); 1021 kvm_release_pfn_clean(pfn); 1022 } 1023 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1024 1025 void kvm_set_page_dirty(struct page *page) 1026 { 1027 kvm_set_pfn_dirty(page_to_pfn(page)); 1028 } 1029 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1030 1031 void kvm_set_pfn_dirty(pfn_t pfn) 1032 { 1033 if (!kvm_is_mmio_pfn(pfn)) { 1034 struct page *page = pfn_to_page(pfn); 1035 if (!PageReserved(page)) 1036 SetPageDirty(page); 1037 } 1038 } 1039 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1040 1041 void kvm_set_pfn_accessed(pfn_t pfn) 1042 { 1043 if (!kvm_is_mmio_pfn(pfn)) 1044 mark_page_accessed(pfn_to_page(pfn)); 1045 } 1046 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1047 1048 void kvm_get_pfn(pfn_t pfn) 1049 { 1050 if (!kvm_is_mmio_pfn(pfn)) 1051 get_page(pfn_to_page(pfn)); 1052 } 1053 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1054 1055 static int next_segment(unsigned long len, int offset) 1056 { 1057 if (len > PAGE_SIZE - offset) 1058 return PAGE_SIZE - offset; 1059 else 1060 return len; 1061 } 1062 1063 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1064 int len) 1065 { 1066 int r; 1067 unsigned long addr; 1068 1069 addr = gfn_to_hva(kvm, gfn); 1070 if (kvm_is_error_hva(addr)) 1071 return -EFAULT; 1072 r = copy_from_user(data, (void __user *)addr + offset, len); 1073 if (r) 1074 return -EFAULT; 1075 return 0; 1076 } 1077 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1078 1079 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1080 { 1081 gfn_t gfn = gpa >> PAGE_SHIFT; 1082 int seg; 1083 int offset = offset_in_page(gpa); 1084 int ret; 1085 1086 while ((seg = next_segment(len, offset)) != 0) { 1087 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1088 if (ret < 0) 1089 return ret; 1090 offset = 0; 1091 len -= seg; 1092 data += seg; 1093 ++gfn; 1094 } 1095 return 0; 1096 } 1097 EXPORT_SYMBOL_GPL(kvm_read_guest); 1098 1099 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1100 unsigned long len) 1101 { 1102 int r; 1103 unsigned long addr; 1104 gfn_t gfn = gpa >> PAGE_SHIFT; 1105 int offset = offset_in_page(gpa); 1106 1107 addr = gfn_to_hva(kvm, gfn); 1108 if (kvm_is_error_hva(addr)) 1109 return -EFAULT; 1110 pagefault_disable(); 1111 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1112 pagefault_enable(); 1113 if (r) 1114 return -EFAULT; 1115 return 0; 1116 } 1117 EXPORT_SYMBOL(kvm_read_guest_atomic); 1118 1119 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1120 int offset, int len) 1121 { 1122 int r; 1123 unsigned long addr; 1124 1125 addr = gfn_to_hva(kvm, gfn); 1126 if (kvm_is_error_hva(addr)) 1127 return -EFAULT; 1128 r = copy_to_user((void __user *)addr + offset, data, len); 1129 if (r) 1130 return -EFAULT; 1131 mark_page_dirty(kvm, gfn); 1132 return 0; 1133 } 1134 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1135 1136 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1137 unsigned long len) 1138 { 1139 gfn_t gfn = gpa >> PAGE_SHIFT; 1140 int seg; 1141 int offset = offset_in_page(gpa); 1142 int ret; 1143 1144 while ((seg = next_segment(len, offset)) != 0) { 1145 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1146 if (ret < 0) 1147 return ret; 1148 offset = 0; 1149 len -= seg; 1150 data += seg; 1151 ++gfn; 1152 } 1153 return 0; 1154 } 1155 1156 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1157 { 1158 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1159 } 1160 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1161 1162 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1163 { 1164 gfn_t gfn = gpa >> PAGE_SHIFT; 1165 int seg; 1166 int offset = offset_in_page(gpa); 1167 int ret; 1168 1169 while ((seg = next_segment(len, offset)) != 0) { 1170 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1171 if (ret < 0) 1172 return ret; 1173 offset = 0; 1174 len -= seg; 1175 ++gfn; 1176 } 1177 return 0; 1178 } 1179 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1180 1181 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1182 { 1183 struct kvm_memory_slot *memslot; 1184 1185 gfn = unalias_gfn(kvm, gfn); 1186 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1187 if (memslot && memslot->dirty_bitmap) { 1188 unsigned long rel_gfn = gfn - memslot->base_gfn; 1189 1190 /* avoid RMW */ 1191 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1192 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1193 } 1194 } 1195 1196 /* 1197 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1198 */ 1199 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1200 { 1201 DEFINE_WAIT(wait); 1202 1203 for (;;) { 1204 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1205 1206 if (kvm_arch_vcpu_runnable(vcpu)) { 1207 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1208 break; 1209 } 1210 if (kvm_cpu_has_pending_timer(vcpu)) 1211 break; 1212 if (signal_pending(current)) 1213 break; 1214 1215 schedule(); 1216 } 1217 1218 finish_wait(&vcpu->wq, &wait); 1219 } 1220 1221 void kvm_resched(struct kvm_vcpu *vcpu) 1222 { 1223 if (!need_resched()) 1224 return; 1225 cond_resched(); 1226 } 1227 EXPORT_SYMBOL_GPL(kvm_resched); 1228 1229 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1230 { 1231 ktime_t expires; 1232 DEFINE_WAIT(wait); 1233 1234 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1235 1236 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1237 expires = ktime_add_ns(ktime_get(), 100000UL); 1238 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1239 1240 finish_wait(&vcpu->wq, &wait); 1241 } 1242 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1243 1244 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1245 { 1246 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1247 struct page *page; 1248 1249 if (vmf->pgoff == 0) 1250 page = virt_to_page(vcpu->run); 1251 #ifdef CONFIG_X86 1252 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1253 page = virt_to_page(vcpu->arch.pio_data); 1254 #endif 1255 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1256 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1257 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1258 #endif 1259 else 1260 return VM_FAULT_SIGBUS; 1261 get_page(page); 1262 vmf->page = page; 1263 return 0; 1264 } 1265 1266 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1267 .fault = kvm_vcpu_fault, 1268 }; 1269 1270 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1271 { 1272 vma->vm_ops = &kvm_vcpu_vm_ops; 1273 return 0; 1274 } 1275 1276 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1277 { 1278 struct kvm_vcpu *vcpu = filp->private_data; 1279 1280 kvm_put_kvm(vcpu->kvm); 1281 return 0; 1282 } 1283 1284 static struct file_operations kvm_vcpu_fops = { 1285 .release = kvm_vcpu_release, 1286 .unlocked_ioctl = kvm_vcpu_ioctl, 1287 .compat_ioctl = kvm_vcpu_ioctl, 1288 .mmap = kvm_vcpu_mmap, 1289 }; 1290 1291 /* 1292 * Allocates an inode for the vcpu. 1293 */ 1294 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1295 { 1296 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1297 } 1298 1299 /* 1300 * Creates some virtual cpus. Good luck creating more than one. 1301 */ 1302 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1303 { 1304 int r; 1305 struct kvm_vcpu *vcpu, *v; 1306 1307 vcpu = kvm_arch_vcpu_create(kvm, id); 1308 if (IS_ERR(vcpu)) 1309 return PTR_ERR(vcpu); 1310 1311 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1312 1313 r = kvm_arch_vcpu_setup(vcpu); 1314 if (r) 1315 return r; 1316 1317 mutex_lock(&kvm->lock); 1318 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1319 r = -EINVAL; 1320 goto vcpu_destroy; 1321 } 1322 1323 kvm_for_each_vcpu(r, v, kvm) 1324 if (v->vcpu_id == id) { 1325 r = -EEXIST; 1326 goto vcpu_destroy; 1327 } 1328 1329 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1330 1331 /* Now it's all set up, let userspace reach it */ 1332 kvm_get_kvm(kvm); 1333 r = create_vcpu_fd(vcpu); 1334 if (r < 0) { 1335 kvm_put_kvm(kvm); 1336 goto vcpu_destroy; 1337 } 1338 1339 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1340 smp_wmb(); 1341 atomic_inc(&kvm->online_vcpus); 1342 1343 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1344 if (kvm->bsp_vcpu_id == id) 1345 kvm->bsp_vcpu = vcpu; 1346 #endif 1347 mutex_unlock(&kvm->lock); 1348 return r; 1349 1350 vcpu_destroy: 1351 mutex_unlock(&kvm->lock); 1352 kvm_arch_vcpu_destroy(vcpu); 1353 return r; 1354 } 1355 1356 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1357 { 1358 if (sigset) { 1359 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1360 vcpu->sigset_active = 1; 1361 vcpu->sigset = *sigset; 1362 } else 1363 vcpu->sigset_active = 0; 1364 return 0; 1365 } 1366 1367 static long kvm_vcpu_ioctl(struct file *filp, 1368 unsigned int ioctl, unsigned long arg) 1369 { 1370 struct kvm_vcpu *vcpu = filp->private_data; 1371 void __user *argp = (void __user *)arg; 1372 int r; 1373 struct kvm_fpu *fpu = NULL; 1374 struct kvm_sregs *kvm_sregs = NULL; 1375 1376 if (vcpu->kvm->mm != current->mm) 1377 return -EIO; 1378 switch (ioctl) { 1379 case KVM_RUN: 1380 r = -EINVAL; 1381 if (arg) 1382 goto out; 1383 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1384 break; 1385 case KVM_GET_REGS: { 1386 struct kvm_regs *kvm_regs; 1387 1388 r = -ENOMEM; 1389 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1390 if (!kvm_regs) 1391 goto out; 1392 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1393 if (r) 1394 goto out_free1; 1395 r = -EFAULT; 1396 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1397 goto out_free1; 1398 r = 0; 1399 out_free1: 1400 kfree(kvm_regs); 1401 break; 1402 } 1403 case KVM_SET_REGS: { 1404 struct kvm_regs *kvm_regs; 1405 1406 r = -ENOMEM; 1407 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1408 if (!kvm_regs) 1409 goto out; 1410 r = -EFAULT; 1411 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1412 goto out_free2; 1413 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1414 if (r) 1415 goto out_free2; 1416 r = 0; 1417 out_free2: 1418 kfree(kvm_regs); 1419 break; 1420 } 1421 case KVM_GET_SREGS: { 1422 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1423 r = -ENOMEM; 1424 if (!kvm_sregs) 1425 goto out; 1426 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1427 if (r) 1428 goto out; 1429 r = -EFAULT; 1430 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1431 goto out; 1432 r = 0; 1433 break; 1434 } 1435 case KVM_SET_SREGS: { 1436 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1437 r = -ENOMEM; 1438 if (!kvm_sregs) 1439 goto out; 1440 r = -EFAULT; 1441 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1442 goto out; 1443 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1444 if (r) 1445 goto out; 1446 r = 0; 1447 break; 1448 } 1449 case KVM_GET_MP_STATE: { 1450 struct kvm_mp_state mp_state; 1451 1452 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1453 if (r) 1454 goto out; 1455 r = -EFAULT; 1456 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1457 goto out; 1458 r = 0; 1459 break; 1460 } 1461 case KVM_SET_MP_STATE: { 1462 struct kvm_mp_state mp_state; 1463 1464 r = -EFAULT; 1465 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1466 goto out; 1467 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1468 if (r) 1469 goto out; 1470 r = 0; 1471 break; 1472 } 1473 case KVM_TRANSLATE: { 1474 struct kvm_translation tr; 1475 1476 r = -EFAULT; 1477 if (copy_from_user(&tr, argp, sizeof tr)) 1478 goto out; 1479 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1480 if (r) 1481 goto out; 1482 r = -EFAULT; 1483 if (copy_to_user(argp, &tr, sizeof tr)) 1484 goto out; 1485 r = 0; 1486 break; 1487 } 1488 case KVM_SET_GUEST_DEBUG: { 1489 struct kvm_guest_debug dbg; 1490 1491 r = -EFAULT; 1492 if (copy_from_user(&dbg, argp, sizeof dbg)) 1493 goto out; 1494 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1495 if (r) 1496 goto out; 1497 r = 0; 1498 break; 1499 } 1500 case KVM_SET_SIGNAL_MASK: { 1501 struct kvm_signal_mask __user *sigmask_arg = argp; 1502 struct kvm_signal_mask kvm_sigmask; 1503 sigset_t sigset, *p; 1504 1505 p = NULL; 1506 if (argp) { 1507 r = -EFAULT; 1508 if (copy_from_user(&kvm_sigmask, argp, 1509 sizeof kvm_sigmask)) 1510 goto out; 1511 r = -EINVAL; 1512 if (kvm_sigmask.len != sizeof sigset) 1513 goto out; 1514 r = -EFAULT; 1515 if (copy_from_user(&sigset, sigmask_arg->sigset, 1516 sizeof sigset)) 1517 goto out; 1518 p = &sigset; 1519 } 1520 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1521 break; 1522 } 1523 case KVM_GET_FPU: { 1524 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1525 r = -ENOMEM; 1526 if (!fpu) 1527 goto out; 1528 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1529 if (r) 1530 goto out; 1531 r = -EFAULT; 1532 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1533 goto out; 1534 r = 0; 1535 break; 1536 } 1537 case KVM_SET_FPU: { 1538 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1539 r = -ENOMEM; 1540 if (!fpu) 1541 goto out; 1542 r = -EFAULT; 1543 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1544 goto out; 1545 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1546 if (r) 1547 goto out; 1548 r = 0; 1549 break; 1550 } 1551 default: 1552 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1553 } 1554 out: 1555 kfree(fpu); 1556 kfree(kvm_sregs); 1557 return r; 1558 } 1559 1560 static long kvm_vm_ioctl(struct file *filp, 1561 unsigned int ioctl, unsigned long arg) 1562 { 1563 struct kvm *kvm = filp->private_data; 1564 void __user *argp = (void __user *)arg; 1565 int r; 1566 1567 if (kvm->mm != current->mm) 1568 return -EIO; 1569 switch (ioctl) { 1570 case KVM_CREATE_VCPU: 1571 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1572 if (r < 0) 1573 goto out; 1574 break; 1575 case KVM_SET_USER_MEMORY_REGION: { 1576 struct kvm_userspace_memory_region kvm_userspace_mem; 1577 1578 r = -EFAULT; 1579 if (copy_from_user(&kvm_userspace_mem, argp, 1580 sizeof kvm_userspace_mem)) 1581 goto out; 1582 1583 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1584 if (r) 1585 goto out; 1586 break; 1587 } 1588 case KVM_GET_DIRTY_LOG: { 1589 struct kvm_dirty_log log; 1590 1591 r = -EFAULT; 1592 if (copy_from_user(&log, argp, sizeof log)) 1593 goto out; 1594 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1595 if (r) 1596 goto out; 1597 break; 1598 } 1599 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1600 case KVM_REGISTER_COALESCED_MMIO: { 1601 struct kvm_coalesced_mmio_zone zone; 1602 r = -EFAULT; 1603 if (copy_from_user(&zone, argp, sizeof zone)) 1604 goto out; 1605 r = -ENXIO; 1606 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1607 if (r) 1608 goto out; 1609 r = 0; 1610 break; 1611 } 1612 case KVM_UNREGISTER_COALESCED_MMIO: { 1613 struct kvm_coalesced_mmio_zone zone; 1614 r = -EFAULT; 1615 if (copy_from_user(&zone, argp, sizeof zone)) 1616 goto out; 1617 r = -ENXIO; 1618 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1619 if (r) 1620 goto out; 1621 r = 0; 1622 break; 1623 } 1624 #endif 1625 case KVM_IRQFD: { 1626 struct kvm_irqfd data; 1627 1628 r = -EFAULT; 1629 if (copy_from_user(&data, argp, sizeof data)) 1630 goto out; 1631 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1632 break; 1633 } 1634 case KVM_IOEVENTFD: { 1635 struct kvm_ioeventfd data; 1636 1637 r = -EFAULT; 1638 if (copy_from_user(&data, argp, sizeof data)) 1639 goto out; 1640 r = kvm_ioeventfd(kvm, &data); 1641 break; 1642 } 1643 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1644 case KVM_SET_BOOT_CPU_ID: 1645 r = 0; 1646 mutex_lock(&kvm->lock); 1647 if (atomic_read(&kvm->online_vcpus) != 0) 1648 r = -EBUSY; 1649 else 1650 kvm->bsp_vcpu_id = arg; 1651 mutex_unlock(&kvm->lock); 1652 break; 1653 #endif 1654 default: 1655 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1656 if (r == -ENOTTY) 1657 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1658 } 1659 out: 1660 return r; 1661 } 1662 1663 #ifdef CONFIG_COMPAT 1664 struct compat_kvm_dirty_log { 1665 __u32 slot; 1666 __u32 padding1; 1667 union { 1668 compat_uptr_t dirty_bitmap; /* one bit per page */ 1669 __u64 padding2; 1670 }; 1671 }; 1672 1673 static long kvm_vm_compat_ioctl(struct file *filp, 1674 unsigned int ioctl, unsigned long arg) 1675 { 1676 struct kvm *kvm = filp->private_data; 1677 int r; 1678 1679 if (kvm->mm != current->mm) 1680 return -EIO; 1681 switch (ioctl) { 1682 case KVM_GET_DIRTY_LOG: { 1683 struct compat_kvm_dirty_log compat_log; 1684 struct kvm_dirty_log log; 1685 1686 r = -EFAULT; 1687 if (copy_from_user(&compat_log, (void __user *)arg, 1688 sizeof(compat_log))) 1689 goto out; 1690 log.slot = compat_log.slot; 1691 log.padding1 = compat_log.padding1; 1692 log.padding2 = compat_log.padding2; 1693 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1694 1695 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1696 if (r) 1697 goto out; 1698 break; 1699 } 1700 default: 1701 r = kvm_vm_ioctl(filp, ioctl, arg); 1702 } 1703 1704 out: 1705 return r; 1706 } 1707 #endif 1708 1709 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1710 { 1711 struct page *page[1]; 1712 unsigned long addr; 1713 int npages; 1714 gfn_t gfn = vmf->pgoff; 1715 struct kvm *kvm = vma->vm_file->private_data; 1716 1717 addr = gfn_to_hva(kvm, gfn); 1718 if (kvm_is_error_hva(addr)) 1719 return VM_FAULT_SIGBUS; 1720 1721 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1722 NULL); 1723 if (unlikely(npages != 1)) 1724 return VM_FAULT_SIGBUS; 1725 1726 vmf->page = page[0]; 1727 return 0; 1728 } 1729 1730 static const struct vm_operations_struct kvm_vm_vm_ops = { 1731 .fault = kvm_vm_fault, 1732 }; 1733 1734 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1735 { 1736 vma->vm_ops = &kvm_vm_vm_ops; 1737 return 0; 1738 } 1739 1740 static struct file_operations kvm_vm_fops = { 1741 .release = kvm_vm_release, 1742 .unlocked_ioctl = kvm_vm_ioctl, 1743 #ifdef CONFIG_COMPAT 1744 .compat_ioctl = kvm_vm_compat_ioctl, 1745 #endif 1746 .mmap = kvm_vm_mmap, 1747 }; 1748 1749 static int kvm_dev_ioctl_create_vm(void) 1750 { 1751 int fd; 1752 struct kvm *kvm; 1753 1754 kvm = kvm_create_vm(); 1755 if (IS_ERR(kvm)) 1756 return PTR_ERR(kvm); 1757 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1758 if (fd < 0) 1759 kvm_put_kvm(kvm); 1760 1761 return fd; 1762 } 1763 1764 static long kvm_dev_ioctl_check_extension_generic(long arg) 1765 { 1766 switch (arg) { 1767 case KVM_CAP_USER_MEMORY: 1768 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1769 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1770 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1771 case KVM_CAP_SET_BOOT_CPU_ID: 1772 #endif 1773 case KVM_CAP_INTERNAL_ERROR_DATA: 1774 return 1; 1775 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1776 case KVM_CAP_IRQ_ROUTING: 1777 return KVM_MAX_IRQ_ROUTES; 1778 #endif 1779 default: 1780 break; 1781 } 1782 return kvm_dev_ioctl_check_extension(arg); 1783 } 1784 1785 static long kvm_dev_ioctl(struct file *filp, 1786 unsigned int ioctl, unsigned long arg) 1787 { 1788 long r = -EINVAL; 1789 1790 switch (ioctl) { 1791 case KVM_GET_API_VERSION: 1792 r = -EINVAL; 1793 if (arg) 1794 goto out; 1795 r = KVM_API_VERSION; 1796 break; 1797 case KVM_CREATE_VM: 1798 r = -EINVAL; 1799 if (arg) 1800 goto out; 1801 r = kvm_dev_ioctl_create_vm(); 1802 break; 1803 case KVM_CHECK_EXTENSION: 1804 r = kvm_dev_ioctl_check_extension_generic(arg); 1805 break; 1806 case KVM_GET_VCPU_MMAP_SIZE: 1807 r = -EINVAL; 1808 if (arg) 1809 goto out; 1810 r = PAGE_SIZE; /* struct kvm_run */ 1811 #ifdef CONFIG_X86 1812 r += PAGE_SIZE; /* pio data page */ 1813 #endif 1814 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1815 r += PAGE_SIZE; /* coalesced mmio ring page */ 1816 #endif 1817 break; 1818 case KVM_TRACE_ENABLE: 1819 case KVM_TRACE_PAUSE: 1820 case KVM_TRACE_DISABLE: 1821 r = -EOPNOTSUPP; 1822 break; 1823 default: 1824 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1825 } 1826 out: 1827 return r; 1828 } 1829 1830 static struct file_operations kvm_chardev_ops = { 1831 .unlocked_ioctl = kvm_dev_ioctl, 1832 .compat_ioctl = kvm_dev_ioctl, 1833 }; 1834 1835 static struct miscdevice kvm_dev = { 1836 KVM_MINOR, 1837 "kvm", 1838 &kvm_chardev_ops, 1839 }; 1840 1841 static void hardware_enable(void *junk) 1842 { 1843 int cpu = raw_smp_processor_id(); 1844 int r; 1845 1846 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1847 return; 1848 1849 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1850 1851 r = kvm_arch_hardware_enable(NULL); 1852 1853 if (r) { 1854 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1855 atomic_inc(&hardware_enable_failed); 1856 printk(KERN_INFO "kvm: enabling virtualization on " 1857 "CPU%d failed\n", cpu); 1858 } 1859 } 1860 1861 static void hardware_disable(void *junk) 1862 { 1863 int cpu = raw_smp_processor_id(); 1864 1865 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1866 return; 1867 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1868 kvm_arch_hardware_disable(NULL); 1869 } 1870 1871 static void hardware_disable_all_nolock(void) 1872 { 1873 BUG_ON(!kvm_usage_count); 1874 1875 kvm_usage_count--; 1876 if (!kvm_usage_count) 1877 on_each_cpu(hardware_disable, NULL, 1); 1878 } 1879 1880 static void hardware_disable_all(void) 1881 { 1882 spin_lock(&kvm_lock); 1883 hardware_disable_all_nolock(); 1884 spin_unlock(&kvm_lock); 1885 } 1886 1887 static int hardware_enable_all(void) 1888 { 1889 int r = 0; 1890 1891 spin_lock(&kvm_lock); 1892 1893 kvm_usage_count++; 1894 if (kvm_usage_count == 1) { 1895 atomic_set(&hardware_enable_failed, 0); 1896 on_each_cpu(hardware_enable, NULL, 1); 1897 1898 if (atomic_read(&hardware_enable_failed)) { 1899 hardware_disable_all_nolock(); 1900 r = -EBUSY; 1901 } 1902 } 1903 1904 spin_unlock(&kvm_lock); 1905 1906 return r; 1907 } 1908 1909 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1910 void *v) 1911 { 1912 int cpu = (long)v; 1913 1914 if (!kvm_usage_count) 1915 return NOTIFY_OK; 1916 1917 val &= ~CPU_TASKS_FROZEN; 1918 switch (val) { 1919 case CPU_DYING: 1920 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1921 cpu); 1922 hardware_disable(NULL); 1923 break; 1924 case CPU_UP_CANCELED: 1925 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1926 cpu); 1927 smp_call_function_single(cpu, hardware_disable, NULL, 1); 1928 break; 1929 case CPU_ONLINE: 1930 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1931 cpu); 1932 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1933 break; 1934 } 1935 return NOTIFY_OK; 1936 } 1937 1938 1939 asmlinkage void kvm_handle_fault_on_reboot(void) 1940 { 1941 if (kvm_rebooting) 1942 /* spin while reset goes on */ 1943 while (true) 1944 ; 1945 /* Fault while not rebooting. We want the trace. */ 1946 BUG(); 1947 } 1948 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1949 1950 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1951 void *v) 1952 { 1953 /* 1954 * Some (well, at least mine) BIOSes hang on reboot if 1955 * in vmx root mode. 1956 * 1957 * And Intel TXT required VMX off for all cpu when system shutdown. 1958 */ 1959 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1960 kvm_rebooting = true; 1961 on_each_cpu(hardware_disable, NULL, 1); 1962 return NOTIFY_OK; 1963 } 1964 1965 static struct notifier_block kvm_reboot_notifier = { 1966 .notifier_call = kvm_reboot, 1967 .priority = 0, 1968 }; 1969 1970 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 1971 { 1972 int i; 1973 1974 for (i = 0; i < bus->dev_count; i++) { 1975 struct kvm_io_device *pos = bus->devs[i]; 1976 1977 kvm_iodevice_destructor(pos); 1978 } 1979 kfree(bus); 1980 } 1981 1982 /* kvm_io_bus_write - called under kvm->slots_lock */ 1983 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 1984 int len, const void *val) 1985 { 1986 int i; 1987 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 1988 for (i = 0; i < bus->dev_count; i++) 1989 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 1990 return 0; 1991 return -EOPNOTSUPP; 1992 } 1993 1994 /* kvm_io_bus_read - called under kvm->slots_lock */ 1995 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 1996 int len, void *val) 1997 { 1998 int i; 1999 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); 2000 2001 for (i = 0; i < bus->dev_count; i++) 2002 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2003 return 0; 2004 return -EOPNOTSUPP; 2005 } 2006 2007 /* Caller must hold slots_lock. */ 2008 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2009 struct kvm_io_device *dev) 2010 { 2011 struct kvm_io_bus *new_bus, *bus; 2012 2013 bus = kvm->buses[bus_idx]; 2014 if (bus->dev_count > NR_IOBUS_DEVS-1) 2015 return -ENOSPC; 2016 2017 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2018 if (!new_bus) 2019 return -ENOMEM; 2020 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2021 new_bus->devs[new_bus->dev_count++] = dev; 2022 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2023 synchronize_srcu_expedited(&kvm->srcu); 2024 kfree(bus); 2025 2026 return 0; 2027 } 2028 2029 /* Caller must hold slots_lock. */ 2030 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2031 struct kvm_io_device *dev) 2032 { 2033 int i, r; 2034 struct kvm_io_bus *new_bus, *bus; 2035 2036 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2037 if (!new_bus) 2038 return -ENOMEM; 2039 2040 bus = kvm->buses[bus_idx]; 2041 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2042 2043 r = -ENOENT; 2044 for (i = 0; i < new_bus->dev_count; i++) 2045 if (new_bus->devs[i] == dev) { 2046 r = 0; 2047 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2048 break; 2049 } 2050 2051 if (r) { 2052 kfree(new_bus); 2053 return r; 2054 } 2055 2056 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2057 synchronize_srcu_expedited(&kvm->srcu); 2058 kfree(bus); 2059 return r; 2060 } 2061 2062 static struct notifier_block kvm_cpu_notifier = { 2063 .notifier_call = kvm_cpu_hotplug, 2064 .priority = 20, /* must be > scheduler priority */ 2065 }; 2066 2067 static int vm_stat_get(void *_offset, u64 *val) 2068 { 2069 unsigned offset = (long)_offset; 2070 struct kvm *kvm; 2071 2072 *val = 0; 2073 spin_lock(&kvm_lock); 2074 list_for_each_entry(kvm, &vm_list, vm_list) 2075 *val += *(u32 *)((void *)kvm + offset); 2076 spin_unlock(&kvm_lock); 2077 return 0; 2078 } 2079 2080 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2081 2082 static int vcpu_stat_get(void *_offset, u64 *val) 2083 { 2084 unsigned offset = (long)_offset; 2085 struct kvm *kvm; 2086 struct kvm_vcpu *vcpu; 2087 int i; 2088 2089 *val = 0; 2090 spin_lock(&kvm_lock); 2091 list_for_each_entry(kvm, &vm_list, vm_list) 2092 kvm_for_each_vcpu(i, vcpu, kvm) 2093 *val += *(u32 *)((void *)vcpu + offset); 2094 2095 spin_unlock(&kvm_lock); 2096 return 0; 2097 } 2098 2099 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2100 2101 static const struct file_operations *stat_fops[] = { 2102 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2103 [KVM_STAT_VM] = &vm_stat_fops, 2104 }; 2105 2106 static void kvm_init_debug(void) 2107 { 2108 struct kvm_stats_debugfs_item *p; 2109 2110 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2111 for (p = debugfs_entries; p->name; ++p) 2112 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2113 (void *)(long)p->offset, 2114 stat_fops[p->kind]); 2115 } 2116 2117 static void kvm_exit_debug(void) 2118 { 2119 struct kvm_stats_debugfs_item *p; 2120 2121 for (p = debugfs_entries; p->name; ++p) 2122 debugfs_remove(p->dentry); 2123 debugfs_remove(kvm_debugfs_dir); 2124 } 2125 2126 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2127 { 2128 if (kvm_usage_count) 2129 hardware_disable(NULL); 2130 return 0; 2131 } 2132 2133 static int kvm_resume(struct sys_device *dev) 2134 { 2135 if (kvm_usage_count) 2136 hardware_enable(NULL); 2137 return 0; 2138 } 2139 2140 static struct sysdev_class kvm_sysdev_class = { 2141 .name = "kvm", 2142 .suspend = kvm_suspend, 2143 .resume = kvm_resume, 2144 }; 2145 2146 static struct sys_device kvm_sysdev = { 2147 .id = 0, 2148 .cls = &kvm_sysdev_class, 2149 }; 2150 2151 struct page *bad_page; 2152 pfn_t bad_pfn; 2153 2154 static inline 2155 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2156 { 2157 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2158 } 2159 2160 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2161 { 2162 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2163 2164 kvm_arch_vcpu_load(vcpu, cpu); 2165 } 2166 2167 static void kvm_sched_out(struct preempt_notifier *pn, 2168 struct task_struct *next) 2169 { 2170 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2171 2172 kvm_arch_vcpu_put(vcpu); 2173 } 2174 2175 int kvm_init(void *opaque, unsigned int vcpu_size, 2176 struct module *module) 2177 { 2178 int r; 2179 int cpu; 2180 2181 r = kvm_arch_init(opaque); 2182 if (r) 2183 goto out_fail; 2184 2185 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2186 2187 if (bad_page == NULL) { 2188 r = -ENOMEM; 2189 goto out; 2190 } 2191 2192 bad_pfn = page_to_pfn(bad_page); 2193 2194 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2195 r = -ENOMEM; 2196 goto out_free_0; 2197 } 2198 2199 r = kvm_arch_hardware_setup(); 2200 if (r < 0) 2201 goto out_free_0a; 2202 2203 for_each_online_cpu(cpu) { 2204 smp_call_function_single(cpu, 2205 kvm_arch_check_processor_compat, 2206 &r, 1); 2207 if (r < 0) 2208 goto out_free_1; 2209 } 2210 2211 r = register_cpu_notifier(&kvm_cpu_notifier); 2212 if (r) 2213 goto out_free_2; 2214 register_reboot_notifier(&kvm_reboot_notifier); 2215 2216 r = sysdev_class_register(&kvm_sysdev_class); 2217 if (r) 2218 goto out_free_3; 2219 2220 r = sysdev_register(&kvm_sysdev); 2221 if (r) 2222 goto out_free_4; 2223 2224 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2225 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, 2226 __alignof__(struct kvm_vcpu), 2227 0, NULL); 2228 if (!kvm_vcpu_cache) { 2229 r = -ENOMEM; 2230 goto out_free_5; 2231 } 2232 2233 kvm_chardev_ops.owner = module; 2234 kvm_vm_fops.owner = module; 2235 kvm_vcpu_fops.owner = module; 2236 2237 r = misc_register(&kvm_dev); 2238 if (r) { 2239 printk(KERN_ERR "kvm: misc device register failed\n"); 2240 goto out_free; 2241 } 2242 2243 kvm_preempt_ops.sched_in = kvm_sched_in; 2244 kvm_preempt_ops.sched_out = kvm_sched_out; 2245 2246 kvm_init_debug(); 2247 2248 return 0; 2249 2250 out_free: 2251 kmem_cache_destroy(kvm_vcpu_cache); 2252 out_free_5: 2253 sysdev_unregister(&kvm_sysdev); 2254 out_free_4: 2255 sysdev_class_unregister(&kvm_sysdev_class); 2256 out_free_3: 2257 unregister_reboot_notifier(&kvm_reboot_notifier); 2258 unregister_cpu_notifier(&kvm_cpu_notifier); 2259 out_free_2: 2260 out_free_1: 2261 kvm_arch_hardware_unsetup(); 2262 out_free_0a: 2263 free_cpumask_var(cpus_hardware_enabled); 2264 out_free_0: 2265 __free_page(bad_page); 2266 out: 2267 kvm_arch_exit(); 2268 out_fail: 2269 return r; 2270 } 2271 EXPORT_SYMBOL_GPL(kvm_init); 2272 2273 void kvm_exit(void) 2274 { 2275 tracepoint_synchronize_unregister(); 2276 kvm_exit_debug(); 2277 misc_deregister(&kvm_dev); 2278 kmem_cache_destroy(kvm_vcpu_cache); 2279 sysdev_unregister(&kvm_sysdev); 2280 sysdev_class_unregister(&kvm_sysdev_class); 2281 unregister_reboot_notifier(&kvm_reboot_notifier); 2282 unregister_cpu_notifier(&kvm_cpu_notifier); 2283 on_each_cpu(hardware_disable, NULL, 1); 2284 kvm_arch_hardware_unsetup(); 2285 kvm_arch_exit(); 2286 free_cpumask_var(cpus_hardware_enabled); 2287 __free_page(bad_page); 2288 } 2289 EXPORT_SYMBOL_GPL(kvm_exit); 2290