1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affilates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/sysdev.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 51 #include <asm/processor.h> 52 #include <asm/io.h> 53 #include <asm/uaccess.h> 54 #include <asm/pgtable.h> 55 #include <asm-generic/bitops/le.h> 56 57 #include "coalesced_mmio.h" 58 59 #define CREATE_TRACE_POINTS 60 #include <trace/events/kvm.h> 61 62 MODULE_AUTHOR("Qumranet"); 63 MODULE_LICENSE("GPL"); 64 65 /* 66 * Ordering of locks: 67 * 68 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 69 */ 70 71 DEFINE_SPINLOCK(kvm_lock); 72 LIST_HEAD(vm_list); 73 74 static cpumask_var_t cpus_hardware_enabled; 75 static int kvm_usage_count = 0; 76 static atomic_t hardware_enable_failed; 77 78 struct kmem_cache *kvm_vcpu_cache; 79 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 80 81 static __read_mostly struct preempt_ops kvm_preempt_ops; 82 83 struct dentry *kvm_debugfs_dir; 84 85 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 86 unsigned long arg); 87 static int hardware_enable_all(void); 88 static void hardware_disable_all(void); 89 90 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 91 92 static bool kvm_rebooting; 93 94 static bool largepages_enabled = true; 95 96 static struct page *hwpoison_page; 97 static pfn_t hwpoison_pfn; 98 99 static struct page *fault_page; 100 static pfn_t fault_pfn; 101 102 inline int kvm_is_mmio_pfn(pfn_t pfn) 103 { 104 if (pfn_valid(pfn)) { 105 struct page *page = compound_head(pfn_to_page(pfn)); 106 return PageReserved(page); 107 } 108 109 return true; 110 } 111 112 /* 113 * Switches to specified vcpu, until a matching vcpu_put() 114 */ 115 void vcpu_load(struct kvm_vcpu *vcpu) 116 { 117 int cpu; 118 119 mutex_lock(&vcpu->mutex); 120 cpu = get_cpu(); 121 preempt_notifier_register(&vcpu->preempt_notifier); 122 kvm_arch_vcpu_load(vcpu, cpu); 123 put_cpu(); 124 } 125 126 void vcpu_put(struct kvm_vcpu *vcpu) 127 { 128 preempt_disable(); 129 kvm_arch_vcpu_put(vcpu); 130 preempt_notifier_unregister(&vcpu->preempt_notifier); 131 preempt_enable(); 132 mutex_unlock(&vcpu->mutex); 133 } 134 135 static void ack_flush(void *_completed) 136 { 137 } 138 139 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 140 { 141 int i, cpu, me; 142 cpumask_var_t cpus; 143 bool called = true; 144 struct kvm_vcpu *vcpu; 145 146 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 147 148 raw_spin_lock(&kvm->requests_lock); 149 me = smp_processor_id(); 150 kvm_for_each_vcpu(i, vcpu, kvm) { 151 if (kvm_make_check_request(req, vcpu)) 152 continue; 153 cpu = vcpu->cpu; 154 if (cpus != NULL && cpu != -1 && cpu != me) 155 cpumask_set_cpu(cpu, cpus); 156 } 157 if (unlikely(cpus == NULL)) 158 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 159 else if (!cpumask_empty(cpus)) 160 smp_call_function_many(cpus, ack_flush, NULL, 1); 161 else 162 called = false; 163 raw_spin_unlock(&kvm->requests_lock); 164 free_cpumask_var(cpus); 165 return called; 166 } 167 168 void kvm_flush_remote_tlbs(struct kvm *kvm) 169 { 170 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 171 ++kvm->stat.remote_tlb_flush; 172 } 173 174 void kvm_reload_remote_mmus(struct kvm *kvm) 175 { 176 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 177 } 178 179 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 180 { 181 struct page *page; 182 int r; 183 184 mutex_init(&vcpu->mutex); 185 vcpu->cpu = -1; 186 vcpu->kvm = kvm; 187 vcpu->vcpu_id = id; 188 init_waitqueue_head(&vcpu->wq); 189 190 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 191 if (!page) { 192 r = -ENOMEM; 193 goto fail; 194 } 195 vcpu->run = page_address(page); 196 197 r = kvm_arch_vcpu_init(vcpu); 198 if (r < 0) 199 goto fail_free_run; 200 return 0; 201 202 fail_free_run: 203 free_page((unsigned long)vcpu->run); 204 fail: 205 return r; 206 } 207 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 208 209 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 210 { 211 kvm_arch_vcpu_uninit(vcpu); 212 free_page((unsigned long)vcpu->run); 213 } 214 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 215 216 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 217 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 218 { 219 return container_of(mn, struct kvm, mmu_notifier); 220 } 221 222 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 223 struct mm_struct *mm, 224 unsigned long address) 225 { 226 struct kvm *kvm = mmu_notifier_to_kvm(mn); 227 int need_tlb_flush, idx; 228 229 /* 230 * When ->invalidate_page runs, the linux pte has been zapped 231 * already but the page is still allocated until 232 * ->invalidate_page returns. So if we increase the sequence 233 * here the kvm page fault will notice if the spte can't be 234 * established because the page is going to be freed. If 235 * instead the kvm page fault establishes the spte before 236 * ->invalidate_page runs, kvm_unmap_hva will release it 237 * before returning. 238 * 239 * The sequence increase only need to be seen at spin_unlock 240 * time, and not at spin_lock time. 241 * 242 * Increasing the sequence after the spin_unlock would be 243 * unsafe because the kvm page fault could then establish the 244 * pte after kvm_unmap_hva returned, without noticing the page 245 * is going to be freed. 246 */ 247 idx = srcu_read_lock(&kvm->srcu); 248 spin_lock(&kvm->mmu_lock); 249 kvm->mmu_notifier_seq++; 250 need_tlb_flush = kvm_unmap_hva(kvm, address); 251 spin_unlock(&kvm->mmu_lock); 252 srcu_read_unlock(&kvm->srcu, idx); 253 254 /* we've to flush the tlb before the pages can be freed */ 255 if (need_tlb_flush) 256 kvm_flush_remote_tlbs(kvm); 257 258 } 259 260 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 261 struct mm_struct *mm, 262 unsigned long address, 263 pte_t pte) 264 { 265 struct kvm *kvm = mmu_notifier_to_kvm(mn); 266 int idx; 267 268 idx = srcu_read_lock(&kvm->srcu); 269 spin_lock(&kvm->mmu_lock); 270 kvm->mmu_notifier_seq++; 271 kvm_set_spte_hva(kvm, address, pte); 272 spin_unlock(&kvm->mmu_lock); 273 srcu_read_unlock(&kvm->srcu, idx); 274 } 275 276 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 277 struct mm_struct *mm, 278 unsigned long start, 279 unsigned long end) 280 { 281 struct kvm *kvm = mmu_notifier_to_kvm(mn); 282 int need_tlb_flush = 0, idx; 283 284 idx = srcu_read_lock(&kvm->srcu); 285 spin_lock(&kvm->mmu_lock); 286 /* 287 * The count increase must become visible at unlock time as no 288 * spte can be established without taking the mmu_lock and 289 * count is also read inside the mmu_lock critical section. 290 */ 291 kvm->mmu_notifier_count++; 292 for (; start < end; start += PAGE_SIZE) 293 need_tlb_flush |= kvm_unmap_hva(kvm, start); 294 spin_unlock(&kvm->mmu_lock); 295 srcu_read_unlock(&kvm->srcu, idx); 296 297 /* we've to flush the tlb before the pages can be freed */ 298 if (need_tlb_flush) 299 kvm_flush_remote_tlbs(kvm); 300 } 301 302 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 303 struct mm_struct *mm, 304 unsigned long start, 305 unsigned long end) 306 { 307 struct kvm *kvm = mmu_notifier_to_kvm(mn); 308 309 spin_lock(&kvm->mmu_lock); 310 /* 311 * This sequence increase will notify the kvm page fault that 312 * the page that is going to be mapped in the spte could have 313 * been freed. 314 */ 315 kvm->mmu_notifier_seq++; 316 /* 317 * The above sequence increase must be visible before the 318 * below count decrease but both values are read by the kvm 319 * page fault under mmu_lock spinlock so we don't need to add 320 * a smb_wmb() here in between the two. 321 */ 322 kvm->mmu_notifier_count--; 323 spin_unlock(&kvm->mmu_lock); 324 325 BUG_ON(kvm->mmu_notifier_count < 0); 326 } 327 328 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 329 struct mm_struct *mm, 330 unsigned long address) 331 { 332 struct kvm *kvm = mmu_notifier_to_kvm(mn); 333 int young, idx; 334 335 idx = srcu_read_lock(&kvm->srcu); 336 spin_lock(&kvm->mmu_lock); 337 young = kvm_age_hva(kvm, address); 338 spin_unlock(&kvm->mmu_lock); 339 srcu_read_unlock(&kvm->srcu, idx); 340 341 if (young) 342 kvm_flush_remote_tlbs(kvm); 343 344 return young; 345 } 346 347 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 348 struct mm_struct *mm) 349 { 350 struct kvm *kvm = mmu_notifier_to_kvm(mn); 351 int idx; 352 353 idx = srcu_read_lock(&kvm->srcu); 354 kvm_arch_flush_shadow(kvm); 355 srcu_read_unlock(&kvm->srcu, idx); 356 } 357 358 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 359 .invalidate_page = kvm_mmu_notifier_invalidate_page, 360 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 361 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 362 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 363 .change_pte = kvm_mmu_notifier_change_pte, 364 .release = kvm_mmu_notifier_release, 365 }; 366 367 static int kvm_init_mmu_notifier(struct kvm *kvm) 368 { 369 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 370 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 371 } 372 373 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 374 375 static int kvm_init_mmu_notifier(struct kvm *kvm) 376 { 377 return 0; 378 } 379 380 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 381 382 static struct kvm *kvm_create_vm(void) 383 { 384 int r = 0, i; 385 struct kvm *kvm = kvm_arch_create_vm(); 386 387 if (IS_ERR(kvm)) 388 goto out; 389 390 r = hardware_enable_all(); 391 if (r) 392 goto out_err_nodisable; 393 394 #ifdef CONFIG_HAVE_KVM_IRQCHIP 395 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 396 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 397 #endif 398 399 r = -ENOMEM; 400 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 401 if (!kvm->memslots) 402 goto out_err; 403 if (init_srcu_struct(&kvm->srcu)) 404 goto out_err; 405 for (i = 0; i < KVM_NR_BUSES; i++) { 406 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 407 GFP_KERNEL); 408 if (!kvm->buses[i]) { 409 cleanup_srcu_struct(&kvm->srcu); 410 goto out_err; 411 } 412 } 413 414 r = kvm_init_mmu_notifier(kvm); 415 if (r) { 416 cleanup_srcu_struct(&kvm->srcu); 417 goto out_err; 418 } 419 420 kvm->mm = current->mm; 421 atomic_inc(&kvm->mm->mm_count); 422 spin_lock_init(&kvm->mmu_lock); 423 raw_spin_lock_init(&kvm->requests_lock); 424 kvm_eventfd_init(kvm); 425 mutex_init(&kvm->lock); 426 mutex_init(&kvm->irq_lock); 427 mutex_init(&kvm->slots_lock); 428 atomic_set(&kvm->users_count, 1); 429 spin_lock(&kvm_lock); 430 list_add(&kvm->vm_list, &vm_list); 431 spin_unlock(&kvm_lock); 432 out: 433 return kvm; 434 435 out_err: 436 hardware_disable_all(); 437 out_err_nodisable: 438 for (i = 0; i < KVM_NR_BUSES; i++) 439 kfree(kvm->buses[i]); 440 kfree(kvm->memslots); 441 kfree(kvm); 442 return ERR_PTR(r); 443 } 444 445 /* 446 * Free any memory in @free but not in @dont. 447 */ 448 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 449 struct kvm_memory_slot *dont) 450 { 451 int i; 452 453 if (!dont || free->rmap != dont->rmap) 454 vfree(free->rmap); 455 456 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 457 vfree(free->dirty_bitmap); 458 459 460 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 461 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 462 vfree(free->lpage_info[i]); 463 free->lpage_info[i] = NULL; 464 } 465 } 466 467 free->npages = 0; 468 free->dirty_bitmap = NULL; 469 free->rmap = NULL; 470 } 471 472 void kvm_free_physmem(struct kvm *kvm) 473 { 474 int i; 475 struct kvm_memslots *slots = kvm->memslots; 476 477 for (i = 0; i < slots->nmemslots; ++i) 478 kvm_free_physmem_slot(&slots->memslots[i], NULL); 479 480 kfree(kvm->memslots); 481 } 482 483 static void kvm_destroy_vm(struct kvm *kvm) 484 { 485 int i; 486 struct mm_struct *mm = kvm->mm; 487 488 kvm_arch_sync_events(kvm); 489 spin_lock(&kvm_lock); 490 list_del(&kvm->vm_list); 491 spin_unlock(&kvm_lock); 492 kvm_free_irq_routing(kvm); 493 for (i = 0; i < KVM_NR_BUSES; i++) 494 kvm_io_bus_destroy(kvm->buses[i]); 495 kvm_coalesced_mmio_free(kvm); 496 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 497 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 498 #else 499 kvm_arch_flush_shadow(kvm); 500 #endif 501 kvm_arch_destroy_vm(kvm); 502 hardware_disable_all(); 503 mmdrop(mm); 504 } 505 506 void kvm_get_kvm(struct kvm *kvm) 507 { 508 atomic_inc(&kvm->users_count); 509 } 510 EXPORT_SYMBOL_GPL(kvm_get_kvm); 511 512 void kvm_put_kvm(struct kvm *kvm) 513 { 514 if (atomic_dec_and_test(&kvm->users_count)) 515 kvm_destroy_vm(kvm); 516 } 517 EXPORT_SYMBOL_GPL(kvm_put_kvm); 518 519 520 static int kvm_vm_release(struct inode *inode, struct file *filp) 521 { 522 struct kvm *kvm = filp->private_data; 523 524 kvm_irqfd_release(kvm); 525 526 kvm_put_kvm(kvm); 527 return 0; 528 } 529 530 /* 531 * Allocate some memory and give it an address in the guest physical address 532 * space. 533 * 534 * Discontiguous memory is allowed, mostly for framebuffers. 535 * 536 * Must be called holding mmap_sem for write. 537 */ 538 int __kvm_set_memory_region(struct kvm *kvm, 539 struct kvm_userspace_memory_region *mem, 540 int user_alloc) 541 { 542 int r, flush_shadow = 0; 543 gfn_t base_gfn; 544 unsigned long npages; 545 unsigned long i; 546 struct kvm_memory_slot *memslot; 547 struct kvm_memory_slot old, new; 548 struct kvm_memslots *slots, *old_memslots; 549 550 r = -EINVAL; 551 /* General sanity checks */ 552 if (mem->memory_size & (PAGE_SIZE - 1)) 553 goto out; 554 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 555 goto out; 556 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 557 goto out; 558 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 559 goto out; 560 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 561 goto out; 562 563 memslot = &kvm->memslots->memslots[mem->slot]; 564 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 565 npages = mem->memory_size >> PAGE_SHIFT; 566 567 r = -EINVAL; 568 if (npages > KVM_MEM_MAX_NR_PAGES) 569 goto out; 570 571 if (!npages) 572 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 573 574 new = old = *memslot; 575 576 new.id = mem->slot; 577 new.base_gfn = base_gfn; 578 new.npages = npages; 579 new.flags = mem->flags; 580 581 /* Disallow changing a memory slot's size. */ 582 r = -EINVAL; 583 if (npages && old.npages && npages != old.npages) 584 goto out_free; 585 586 /* Check for overlaps */ 587 r = -EEXIST; 588 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 589 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 590 591 if (s == memslot || !s->npages) 592 continue; 593 if (!((base_gfn + npages <= s->base_gfn) || 594 (base_gfn >= s->base_gfn + s->npages))) 595 goto out_free; 596 } 597 598 /* Free page dirty bitmap if unneeded */ 599 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 600 new.dirty_bitmap = NULL; 601 602 r = -ENOMEM; 603 604 /* Allocate if a slot is being created */ 605 #ifndef CONFIG_S390 606 if (npages && !new.rmap) { 607 new.rmap = vmalloc(npages * sizeof(*new.rmap)); 608 609 if (!new.rmap) 610 goto out_free; 611 612 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 613 614 new.user_alloc = user_alloc; 615 new.userspace_addr = mem->userspace_addr; 616 } 617 if (!npages) 618 goto skip_lpage; 619 620 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 621 unsigned long ugfn; 622 unsigned long j; 623 int lpages; 624 int level = i + 2; 625 626 /* Avoid unused variable warning if no large pages */ 627 (void)level; 628 629 if (new.lpage_info[i]) 630 continue; 631 632 lpages = 1 + ((base_gfn + npages - 1) 633 >> KVM_HPAGE_GFN_SHIFT(level)); 634 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); 635 636 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 637 638 if (!new.lpage_info[i]) 639 goto out_free; 640 641 memset(new.lpage_info[i], 0, 642 lpages * sizeof(*new.lpage_info[i])); 643 644 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 645 new.lpage_info[i][0].write_count = 1; 646 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 647 new.lpage_info[i][lpages - 1].write_count = 1; 648 ugfn = new.userspace_addr >> PAGE_SHIFT; 649 /* 650 * If the gfn and userspace address are not aligned wrt each 651 * other, or if explicitly asked to, disable large page 652 * support for this slot 653 */ 654 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 655 !largepages_enabled) 656 for (j = 0; j < lpages; ++j) 657 new.lpage_info[i][j].write_count = 1; 658 } 659 660 skip_lpage: 661 662 /* Allocate page dirty bitmap if needed */ 663 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 664 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); 665 666 new.dirty_bitmap = vmalloc(dirty_bytes); 667 if (!new.dirty_bitmap) 668 goto out_free; 669 memset(new.dirty_bitmap, 0, dirty_bytes); 670 /* destroy any largepage mappings for dirty tracking */ 671 if (old.npages) 672 flush_shadow = 1; 673 } 674 #else /* not defined CONFIG_S390 */ 675 new.user_alloc = user_alloc; 676 if (user_alloc) 677 new.userspace_addr = mem->userspace_addr; 678 #endif /* not defined CONFIG_S390 */ 679 680 if (!npages) { 681 r = -ENOMEM; 682 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 683 if (!slots) 684 goto out_free; 685 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 686 if (mem->slot >= slots->nmemslots) 687 slots->nmemslots = mem->slot + 1; 688 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 689 690 old_memslots = kvm->memslots; 691 rcu_assign_pointer(kvm->memslots, slots); 692 synchronize_srcu_expedited(&kvm->srcu); 693 /* From this point no new shadow pages pointing to a deleted 694 * memslot will be created. 695 * 696 * validation of sp->gfn happens in: 697 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 698 * - kvm_is_visible_gfn (mmu_check_roots) 699 */ 700 kvm_arch_flush_shadow(kvm); 701 kfree(old_memslots); 702 } 703 704 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 705 if (r) 706 goto out_free; 707 708 #ifdef CONFIG_DMAR 709 /* map the pages in iommu page table */ 710 if (npages) { 711 r = kvm_iommu_map_pages(kvm, &new); 712 if (r) 713 goto out_free; 714 } 715 #endif 716 717 r = -ENOMEM; 718 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 719 if (!slots) 720 goto out_free; 721 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 722 if (mem->slot >= slots->nmemslots) 723 slots->nmemslots = mem->slot + 1; 724 725 /* actual memory is freed via old in kvm_free_physmem_slot below */ 726 if (!npages) { 727 new.rmap = NULL; 728 new.dirty_bitmap = NULL; 729 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 730 new.lpage_info[i] = NULL; 731 } 732 733 slots->memslots[mem->slot] = new; 734 old_memslots = kvm->memslots; 735 rcu_assign_pointer(kvm->memslots, slots); 736 synchronize_srcu_expedited(&kvm->srcu); 737 738 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 739 740 kvm_free_physmem_slot(&old, &new); 741 kfree(old_memslots); 742 743 if (flush_shadow) 744 kvm_arch_flush_shadow(kvm); 745 746 return 0; 747 748 out_free: 749 kvm_free_physmem_slot(&new, &old); 750 out: 751 return r; 752 753 } 754 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 755 756 int kvm_set_memory_region(struct kvm *kvm, 757 struct kvm_userspace_memory_region *mem, 758 int user_alloc) 759 { 760 int r; 761 762 mutex_lock(&kvm->slots_lock); 763 r = __kvm_set_memory_region(kvm, mem, user_alloc); 764 mutex_unlock(&kvm->slots_lock); 765 return r; 766 } 767 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 768 769 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 770 struct 771 kvm_userspace_memory_region *mem, 772 int user_alloc) 773 { 774 if (mem->slot >= KVM_MEMORY_SLOTS) 775 return -EINVAL; 776 return kvm_set_memory_region(kvm, mem, user_alloc); 777 } 778 779 int kvm_get_dirty_log(struct kvm *kvm, 780 struct kvm_dirty_log *log, int *is_dirty) 781 { 782 struct kvm_memory_slot *memslot; 783 int r, i; 784 unsigned long n; 785 unsigned long any = 0; 786 787 r = -EINVAL; 788 if (log->slot >= KVM_MEMORY_SLOTS) 789 goto out; 790 791 memslot = &kvm->memslots->memslots[log->slot]; 792 r = -ENOENT; 793 if (!memslot->dirty_bitmap) 794 goto out; 795 796 n = kvm_dirty_bitmap_bytes(memslot); 797 798 for (i = 0; !any && i < n/sizeof(long); ++i) 799 any = memslot->dirty_bitmap[i]; 800 801 r = -EFAULT; 802 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 803 goto out; 804 805 if (any) 806 *is_dirty = 1; 807 808 r = 0; 809 out: 810 return r; 811 } 812 813 void kvm_disable_largepages(void) 814 { 815 largepages_enabled = false; 816 } 817 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 818 819 int is_error_page(struct page *page) 820 { 821 return page == bad_page || page == hwpoison_page || page == fault_page; 822 } 823 EXPORT_SYMBOL_GPL(is_error_page); 824 825 int is_error_pfn(pfn_t pfn) 826 { 827 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; 828 } 829 EXPORT_SYMBOL_GPL(is_error_pfn); 830 831 int is_hwpoison_pfn(pfn_t pfn) 832 { 833 return pfn == hwpoison_pfn; 834 } 835 EXPORT_SYMBOL_GPL(is_hwpoison_pfn); 836 837 int is_fault_pfn(pfn_t pfn) 838 { 839 return pfn == fault_pfn; 840 } 841 EXPORT_SYMBOL_GPL(is_fault_pfn); 842 843 static inline unsigned long bad_hva(void) 844 { 845 return PAGE_OFFSET; 846 } 847 848 int kvm_is_error_hva(unsigned long addr) 849 { 850 return addr == bad_hva(); 851 } 852 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 853 854 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 855 { 856 int i; 857 struct kvm_memslots *slots = kvm_memslots(kvm); 858 859 for (i = 0; i < slots->nmemslots; ++i) { 860 struct kvm_memory_slot *memslot = &slots->memslots[i]; 861 862 if (gfn >= memslot->base_gfn 863 && gfn < memslot->base_gfn + memslot->npages) 864 return memslot; 865 } 866 return NULL; 867 } 868 EXPORT_SYMBOL_GPL(gfn_to_memslot); 869 870 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 871 { 872 int i; 873 struct kvm_memslots *slots = kvm_memslots(kvm); 874 875 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 876 struct kvm_memory_slot *memslot = &slots->memslots[i]; 877 878 if (memslot->flags & KVM_MEMSLOT_INVALID) 879 continue; 880 881 if (gfn >= memslot->base_gfn 882 && gfn < memslot->base_gfn + memslot->npages) 883 return 1; 884 } 885 return 0; 886 } 887 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 888 889 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 890 { 891 struct vm_area_struct *vma; 892 unsigned long addr, size; 893 894 size = PAGE_SIZE; 895 896 addr = gfn_to_hva(kvm, gfn); 897 if (kvm_is_error_hva(addr)) 898 return PAGE_SIZE; 899 900 down_read(¤t->mm->mmap_sem); 901 vma = find_vma(current->mm, addr); 902 if (!vma) 903 goto out; 904 905 size = vma_kernel_pagesize(vma); 906 907 out: 908 up_read(¤t->mm->mmap_sem); 909 910 return size; 911 } 912 913 int memslot_id(struct kvm *kvm, gfn_t gfn) 914 { 915 int i; 916 struct kvm_memslots *slots = kvm_memslots(kvm); 917 struct kvm_memory_slot *memslot = NULL; 918 919 for (i = 0; i < slots->nmemslots; ++i) { 920 memslot = &slots->memslots[i]; 921 922 if (gfn >= memslot->base_gfn 923 && gfn < memslot->base_gfn + memslot->npages) 924 break; 925 } 926 927 return memslot - slots->memslots; 928 } 929 930 static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 931 { 932 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 933 } 934 935 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 936 { 937 struct kvm_memory_slot *slot; 938 939 slot = gfn_to_memslot(kvm, gfn); 940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 941 return bad_hva(); 942 return gfn_to_hva_memslot(slot, gfn); 943 } 944 EXPORT_SYMBOL_GPL(gfn_to_hva); 945 946 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) 947 { 948 struct page *page[1]; 949 int npages; 950 pfn_t pfn; 951 952 might_sleep(); 953 954 npages = get_user_pages_fast(addr, 1, 1, page); 955 956 if (unlikely(npages != 1)) { 957 struct vm_area_struct *vma; 958 959 down_read(¤t->mm->mmap_sem); 960 if (is_hwpoison_address(addr)) { 961 up_read(¤t->mm->mmap_sem); 962 get_page(hwpoison_page); 963 return page_to_pfn(hwpoison_page); 964 } 965 966 vma = find_vma(current->mm, addr); 967 968 if (vma == NULL || addr < vma->vm_start || 969 !(vma->vm_flags & VM_PFNMAP)) { 970 up_read(¤t->mm->mmap_sem); 971 get_page(fault_page); 972 return page_to_pfn(fault_page); 973 } 974 975 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 976 up_read(¤t->mm->mmap_sem); 977 BUG_ON(!kvm_is_mmio_pfn(pfn)); 978 } else 979 pfn = page_to_pfn(page[0]); 980 981 return pfn; 982 } 983 984 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 985 { 986 unsigned long addr; 987 988 addr = gfn_to_hva(kvm, gfn); 989 if (kvm_is_error_hva(addr)) { 990 get_page(bad_page); 991 return page_to_pfn(bad_page); 992 } 993 994 return hva_to_pfn(kvm, addr); 995 } 996 EXPORT_SYMBOL_GPL(gfn_to_pfn); 997 998 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 999 struct kvm_memory_slot *slot, gfn_t gfn) 1000 { 1001 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1002 return hva_to_pfn(kvm, addr); 1003 } 1004 1005 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1006 { 1007 pfn_t pfn; 1008 1009 pfn = gfn_to_pfn(kvm, gfn); 1010 if (!kvm_is_mmio_pfn(pfn)) 1011 return pfn_to_page(pfn); 1012 1013 WARN_ON(kvm_is_mmio_pfn(pfn)); 1014 1015 get_page(bad_page); 1016 return bad_page; 1017 } 1018 1019 EXPORT_SYMBOL_GPL(gfn_to_page); 1020 1021 void kvm_release_page_clean(struct page *page) 1022 { 1023 kvm_release_pfn_clean(page_to_pfn(page)); 1024 } 1025 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1026 1027 void kvm_release_pfn_clean(pfn_t pfn) 1028 { 1029 if (!kvm_is_mmio_pfn(pfn)) 1030 put_page(pfn_to_page(pfn)); 1031 } 1032 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1033 1034 void kvm_release_page_dirty(struct page *page) 1035 { 1036 kvm_release_pfn_dirty(page_to_pfn(page)); 1037 } 1038 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1039 1040 void kvm_release_pfn_dirty(pfn_t pfn) 1041 { 1042 kvm_set_pfn_dirty(pfn); 1043 kvm_release_pfn_clean(pfn); 1044 } 1045 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1046 1047 void kvm_set_page_dirty(struct page *page) 1048 { 1049 kvm_set_pfn_dirty(page_to_pfn(page)); 1050 } 1051 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1052 1053 void kvm_set_pfn_dirty(pfn_t pfn) 1054 { 1055 if (!kvm_is_mmio_pfn(pfn)) { 1056 struct page *page = pfn_to_page(pfn); 1057 if (!PageReserved(page)) 1058 SetPageDirty(page); 1059 } 1060 } 1061 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1062 1063 void kvm_set_pfn_accessed(pfn_t pfn) 1064 { 1065 if (!kvm_is_mmio_pfn(pfn)) 1066 mark_page_accessed(pfn_to_page(pfn)); 1067 } 1068 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1069 1070 void kvm_get_pfn(pfn_t pfn) 1071 { 1072 if (!kvm_is_mmio_pfn(pfn)) 1073 get_page(pfn_to_page(pfn)); 1074 } 1075 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1076 1077 static int next_segment(unsigned long len, int offset) 1078 { 1079 if (len > PAGE_SIZE - offset) 1080 return PAGE_SIZE - offset; 1081 else 1082 return len; 1083 } 1084 1085 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1086 int len) 1087 { 1088 int r; 1089 unsigned long addr; 1090 1091 addr = gfn_to_hva(kvm, gfn); 1092 if (kvm_is_error_hva(addr)) 1093 return -EFAULT; 1094 r = copy_from_user(data, (void __user *)addr + offset, len); 1095 if (r) 1096 return -EFAULT; 1097 return 0; 1098 } 1099 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1100 1101 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1102 { 1103 gfn_t gfn = gpa >> PAGE_SHIFT; 1104 int seg; 1105 int offset = offset_in_page(gpa); 1106 int ret; 1107 1108 while ((seg = next_segment(len, offset)) != 0) { 1109 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1110 if (ret < 0) 1111 return ret; 1112 offset = 0; 1113 len -= seg; 1114 data += seg; 1115 ++gfn; 1116 } 1117 return 0; 1118 } 1119 EXPORT_SYMBOL_GPL(kvm_read_guest); 1120 1121 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1122 unsigned long len) 1123 { 1124 int r; 1125 unsigned long addr; 1126 gfn_t gfn = gpa >> PAGE_SHIFT; 1127 int offset = offset_in_page(gpa); 1128 1129 addr = gfn_to_hva(kvm, gfn); 1130 if (kvm_is_error_hva(addr)) 1131 return -EFAULT; 1132 pagefault_disable(); 1133 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1134 pagefault_enable(); 1135 if (r) 1136 return -EFAULT; 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(kvm_read_guest_atomic); 1140 1141 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1142 int offset, int len) 1143 { 1144 int r; 1145 unsigned long addr; 1146 1147 addr = gfn_to_hva(kvm, gfn); 1148 if (kvm_is_error_hva(addr)) 1149 return -EFAULT; 1150 r = copy_to_user((void __user *)addr + offset, data, len); 1151 if (r) 1152 return -EFAULT; 1153 mark_page_dirty(kvm, gfn); 1154 return 0; 1155 } 1156 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1157 1158 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1159 unsigned long len) 1160 { 1161 gfn_t gfn = gpa >> PAGE_SHIFT; 1162 int seg; 1163 int offset = offset_in_page(gpa); 1164 int ret; 1165 1166 while ((seg = next_segment(len, offset)) != 0) { 1167 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1168 if (ret < 0) 1169 return ret; 1170 offset = 0; 1171 len -= seg; 1172 data += seg; 1173 ++gfn; 1174 } 1175 return 0; 1176 } 1177 1178 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1179 { 1180 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len); 1181 } 1182 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1183 1184 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1185 { 1186 gfn_t gfn = gpa >> PAGE_SHIFT; 1187 int seg; 1188 int offset = offset_in_page(gpa); 1189 int ret; 1190 1191 while ((seg = next_segment(len, offset)) != 0) { 1192 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1193 if (ret < 0) 1194 return ret; 1195 offset = 0; 1196 len -= seg; 1197 ++gfn; 1198 } 1199 return 0; 1200 } 1201 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1202 1203 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1204 { 1205 struct kvm_memory_slot *memslot; 1206 1207 memslot = gfn_to_memslot(kvm, gfn); 1208 if (memslot && memslot->dirty_bitmap) { 1209 unsigned long rel_gfn = gfn - memslot->base_gfn; 1210 1211 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1212 } 1213 } 1214 1215 /* 1216 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1217 */ 1218 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1219 { 1220 DEFINE_WAIT(wait); 1221 1222 for (;;) { 1223 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1224 1225 if (kvm_arch_vcpu_runnable(vcpu)) { 1226 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1227 break; 1228 } 1229 if (kvm_cpu_has_pending_timer(vcpu)) 1230 break; 1231 if (signal_pending(current)) 1232 break; 1233 1234 schedule(); 1235 } 1236 1237 finish_wait(&vcpu->wq, &wait); 1238 } 1239 1240 void kvm_resched(struct kvm_vcpu *vcpu) 1241 { 1242 if (!need_resched()) 1243 return; 1244 cond_resched(); 1245 } 1246 EXPORT_SYMBOL_GPL(kvm_resched); 1247 1248 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu) 1249 { 1250 ktime_t expires; 1251 DEFINE_WAIT(wait); 1252 1253 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1254 1255 /* Sleep for 100 us, and hope lock-holder got scheduled */ 1256 expires = ktime_add_ns(ktime_get(), 100000UL); 1257 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1258 1259 finish_wait(&vcpu->wq, &wait); 1260 } 1261 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1262 1263 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1264 { 1265 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1266 struct page *page; 1267 1268 if (vmf->pgoff == 0) 1269 page = virt_to_page(vcpu->run); 1270 #ifdef CONFIG_X86 1271 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1272 page = virt_to_page(vcpu->arch.pio_data); 1273 #endif 1274 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1275 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1276 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1277 #endif 1278 else 1279 return VM_FAULT_SIGBUS; 1280 get_page(page); 1281 vmf->page = page; 1282 return 0; 1283 } 1284 1285 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1286 .fault = kvm_vcpu_fault, 1287 }; 1288 1289 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1290 { 1291 vma->vm_ops = &kvm_vcpu_vm_ops; 1292 return 0; 1293 } 1294 1295 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1296 { 1297 struct kvm_vcpu *vcpu = filp->private_data; 1298 1299 kvm_put_kvm(vcpu->kvm); 1300 return 0; 1301 } 1302 1303 static struct file_operations kvm_vcpu_fops = { 1304 .release = kvm_vcpu_release, 1305 .unlocked_ioctl = kvm_vcpu_ioctl, 1306 .compat_ioctl = kvm_vcpu_ioctl, 1307 .mmap = kvm_vcpu_mmap, 1308 .llseek = noop_llseek, 1309 }; 1310 1311 /* 1312 * Allocates an inode for the vcpu. 1313 */ 1314 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1315 { 1316 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1317 } 1318 1319 /* 1320 * Creates some virtual cpus. Good luck creating more than one. 1321 */ 1322 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1323 { 1324 int r; 1325 struct kvm_vcpu *vcpu, *v; 1326 1327 vcpu = kvm_arch_vcpu_create(kvm, id); 1328 if (IS_ERR(vcpu)) 1329 return PTR_ERR(vcpu); 1330 1331 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1332 1333 r = kvm_arch_vcpu_setup(vcpu); 1334 if (r) 1335 return r; 1336 1337 mutex_lock(&kvm->lock); 1338 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1339 r = -EINVAL; 1340 goto vcpu_destroy; 1341 } 1342 1343 kvm_for_each_vcpu(r, v, kvm) 1344 if (v->vcpu_id == id) { 1345 r = -EEXIST; 1346 goto vcpu_destroy; 1347 } 1348 1349 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1350 1351 /* Now it's all set up, let userspace reach it */ 1352 kvm_get_kvm(kvm); 1353 r = create_vcpu_fd(vcpu); 1354 if (r < 0) { 1355 kvm_put_kvm(kvm); 1356 goto vcpu_destroy; 1357 } 1358 1359 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1360 smp_wmb(); 1361 atomic_inc(&kvm->online_vcpus); 1362 1363 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1364 if (kvm->bsp_vcpu_id == id) 1365 kvm->bsp_vcpu = vcpu; 1366 #endif 1367 mutex_unlock(&kvm->lock); 1368 return r; 1369 1370 vcpu_destroy: 1371 mutex_unlock(&kvm->lock); 1372 kvm_arch_vcpu_destroy(vcpu); 1373 return r; 1374 } 1375 1376 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1377 { 1378 if (sigset) { 1379 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1380 vcpu->sigset_active = 1; 1381 vcpu->sigset = *sigset; 1382 } else 1383 vcpu->sigset_active = 0; 1384 return 0; 1385 } 1386 1387 static long kvm_vcpu_ioctl(struct file *filp, 1388 unsigned int ioctl, unsigned long arg) 1389 { 1390 struct kvm_vcpu *vcpu = filp->private_data; 1391 void __user *argp = (void __user *)arg; 1392 int r; 1393 struct kvm_fpu *fpu = NULL; 1394 struct kvm_sregs *kvm_sregs = NULL; 1395 1396 if (vcpu->kvm->mm != current->mm) 1397 return -EIO; 1398 1399 #if defined(CONFIG_S390) || defined(CONFIG_PPC) 1400 /* 1401 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1402 * so vcpu_load() would break it. 1403 */ 1404 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 1405 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1406 #endif 1407 1408 1409 vcpu_load(vcpu); 1410 switch (ioctl) { 1411 case KVM_RUN: 1412 r = -EINVAL; 1413 if (arg) 1414 goto out; 1415 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1416 break; 1417 case KVM_GET_REGS: { 1418 struct kvm_regs *kvm_regs; 1419 1420 r = -ENOMEM; 1421 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1422 if (!kvm_regs) 1423 goto out; 1424 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1425 if (r) 1426 goto out_free1; 1427 r = -EFAULT; 1428 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1429 goto out_free1; 1430 r = 0; 1431 out_free1: 1432 kfree(kvm_regs); 1433 break; 1434 } 1435 case KVM_SET_REGS: { 1436 struct kvm_regs *kvm_regs; 1437 1438 r = -ENOMEM; 1439 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1440 if (!kvm_regs) 1441 goto out; 1442 r = -EFAULT; 1443 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1444 goto out_free2; 1445 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1446 if (r) 1447 goto out_free2; 1448 r = 0; 1449 out_free2: 1450 kfree(kvm_regs); 1451 break; 1452 } 1453 case KVM_GET_SREGS: { 1454 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1455 r = -ENOMEM; 1456 if (!kvm_sregs) 1457 goto out; 1458 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1459 if (r) 1460 goto out; 1461 r = -EFAULT; 1462 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1463 goto out; 1464 r = 0; 1465 break; 1466 } 1467 case KVM_SET_SREGS: { 1468 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1469 r = -ENOMEM; 1470 if (!kvm_sregs) 1471 goto out; 1472 r = -EFAULT; 1473 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1474 goto out; 1475 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1476 if (r) 1477 goto out; 1478 r = 0; 1479 break; 1480 } 1481 case KVM_GET_MP_STATE: { 1482 struct kvm_mp_state mp_state; 1483 1484 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1485 if (r) 1486 goto out; 1487 r = -EFAULT; 1488 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1489 goto out; 1490 r = 0; 1491 break; 1492 } 1493 case KVM_SET_MP_STATE: { 1494 struct kvm_mp_state mp_state; 1495 1496 r = -EFAULT; 1497 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1498 goto out; 1499 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1500 if (r) 1501 goto out; 1502 r = 0; 1503 break; 1504 } 1505 case KVM_TRANSLATE: { 1506 struct kvm_translation tr; 1507 1508 r = -EFAULT; 1509 if (copy_from_user(&tr, argp, sizeof tr)) 1510 goto out; 1511 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1512 if (r) 1513 goto out; 1514 r = -EFAULT; 1515 if (copy_to_user(argp, &tr, sizeof tr)) 1516 goto out; 1517 r = 0; 1518 break; 1519 } 1520 case KVM_SET_GUEST_DEBUG: { 1521 struct kvm_guest_debug dbg; 1522 1523 r = -EFAULT; 1524 if (copy_from_user(&dbg, argp, sizeof dbg)) 1525 goto out; 1526 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1527 if (r) 1528 goto out; 1529 r = 0; 1530 break; 1531 } 1532 case KVM_SET_SIGNAL_MASK: { 1533 struct kvm_signal_mask __user *sigmask_arg = argp; 1534 struct kvm_signal_mask kvm_sigmask; 1535 sigset_t sigset, *p; 1536 1537 p = NULL; 1538 if (argp) { 1539 r = -EFAULT; 1540 if (copy_from_user(&kvm_sigmask, argp, 1541 sizeof kvm_sigmask)) 1542 goto out; 1543 r = -EINVAL; 1544 if (kvm_sigmask.len != sizeof sigset) 1545 goto out; 1546 r = -EFAULT; 1547 if (copy_from_user(&sigset, sigmask_arg->sigset, 1548 sizeof sigset)) 1549 goto out; 1550 p = &sigset; 1551 } 1552 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 1553 break; 1554 } 1555 case KVM_GET_FPU: { 1556 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1557 r = -ENOMEM; 1558 if (!fpu) 1559 goto out; 1560 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1561 if (r) 1562 goto out; 1563 r = -EFAULT; 1564 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1565 goto out; 1566 r = 0; 1567 break; 1568 } 1569 case KVM_SET_FPU: { 1570 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1571 r = -ENOMEM; 1572 if (!fpu) 1573 goto out; 1574 r = -EFAULT; 1575 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1576 goto out; 1577 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1578 if (r) 1579 goto out; 1580 r = 0; 1581 break; 1582 } 1583 default: 1584 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1585 } 1586 out: 1587 vcpu_put(vcpu); 1588 kfree(fpu); 1589 kfree(kvm_sregs); 1590 return r; 1591 } 1592 1593 static long kvm_vm_ioctl(struct file *filp, 1594 unsigned int ioctl, unsigned long arg) 1595 { 1596 struct kvm *kvm = filp->private_data; 1597 void __user *argp = (void __user *)arg; 1598 int r; 1599 1600 if (kvm->mm != current->mm) 1601 return -EIO; 1602 switch (ioctl) { 1603 case KVM_CREATE_VCPU: 1604 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1605 if (r < 0) 1606 goto out; 1607 break; 1608 case KVM_SET_USER_MEMORY_REGION: { 1609 struct kvm_userspace_memory_region kvm_userspace_mem; 1610 1611 r = -EFAULT; 1612 if (copy_from_user(&kvm_userspace_mem, argp, 1613 sizeof kvm_userspace_mem)) 1614 goto out; 1615 1616 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1617 if (r) 1618 goto out; 1619 break; 1620 } 1621 case KVM_GET_DIRTY_LOG: { 1622 struct kvm_dirty_log log; 1623 1624 r = -EFAULT; 1625 if (copy_from_user(&log, argp, sizeof log)) 1626 goto out; 1627 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1628 if (r) 1629 goto out; 1630 break; 1631 } 1632 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1633 case KVM_REGISTER_COALESCED_MMIO: { 1634 struct kvm_coalesced_mmio_zone zone; 1635 r = -EFAULT; 1636 if (copy_from_user(&zone, argp, sizeof zone)) 1637 goto out; 1638 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1639 if (r) 1640 goto out; 1641 r = 0; 1642 break; 1643 } 1644 case KVM_UNREGISTER_COALESCED_MMIO: { 1645 struct kvm_coalesced_mmio_zone zone; 1646 r = -EFAULT; 1647 if (copy_from_user(&zone, argp, sizeof zone)) 1648 goto out; 1649 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1650 if (r) 1651 goto out; 1652 r = 0; 1653 break; 1654 } 1655 #endif 1656 case KVM_IRQFD: { 1657 struct kvm_irqfd data; 1658 1659 r = -EFAULT; 1660 if (copy_from_user(&data, argp, sizeof data)) 1661 goto out; 1662 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1663 break; 1664 } 1665 case KVM_IOEVENTFD: { 1666 struct kvm_ioeventfd data; 1667 1668 r = -EFAULT; 1669 if (copy_from_user(&data, argp, sizeof data)) 1670 goto out; 1671 r = kvm_ioeventfd(kvm, &data); 1672 break; 1673 } 1674 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1675 case KVM_SET_BOOT_CPU_ID: 1676 r = 0; 1677 mutex_lock(&kvm->lock); 1678 if (atomic_read(&kvm->online_vcpus) != 0) 1679 r = -EBUSY; 1680 else 1681 kvm->bsp_vcpu_id = arg; 1682 mutex_unlock(&kvm->lock); 1683 break; 1684 #endif 1685 default: 1686 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1687 if (r == -ENOTTY) 1688 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1689 } 1690 out: 1691 return r; 1692 } 1693 1694 #ifdef CONFIG_COMPAT 1695 struct compat_kvm_dirty_log { 1696 __u32 slot; 1697 __u32 padding1; 1698 union { 1699 compat_uptr_t dirty_bitmap; /* one bit per page */ 1700 __u64 padding2; 1701 }; 1702 }; 1703 1704 static long kvm_vm_compat_ioctl(struct file *filp, 1705 unsigned int ioctl, unsigned long arg) 1706 { 1707 struct kvm *kvm = filp->private_data; 1708 int r; 1709 1710 if (kvm->mm != current->mm) 1711 return -EIO; 1712 switch (ioctl) { 1713 case KVM_GET_DIRTY_LOG: { 1714 struct compat_kvm_dirty_log compat_log; 1715 struct kvm_dirty_log log; 1716 1717 r = -EFAULT; 1718 if (copy_from_user(&compat_log, (void __user *)arg, 1719 sizeof(compat_log))) 1720 goto out; 1721 log.slot = compat_log.slot; 1722 log.padding1 = compat_log.padding1; 1723 log.padding2 = compat_log.padding2; 1724 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 1725 1726 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1727 if (r) 1728 goto out; 1729 break; 1730 } 1731 default: 1732 r = kvm_vm_ioctl(filp, ioctl, arg); 1733 } 1734 1735 out: 1736 return r; 1737 } 1738 #endif 1739 1740 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1741 { 1742 struct page *page[1]; 1743 unsigned long addr; 1744 int npages; 1745 gfn_t gfn = vmf->pgoff; 1746 struct kvm *kvm = vma->vm_file->private_data; 1747 1748 addr = gfn_to_hva(kvm, gfn); 1749 if (kvm_is_error_hva(addr)) 1750 return VM_FAULT_SIGBUS; 1751 1752 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1753 NULL); 1754 if (unlikely(npages != 1)) 1755 return VM_FAULT_SIGBUS; 1756 1757 vmf->page = page[0]; 1758 return 0; 1759 } 1760 1761 static const struct vm_operations_struct kvm_vm_vm_ops = { 1762 .fault = kvm_vm_fault, 1763 }; 1764 1765 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 1766 { 1767 vma->vm_ops = &kvm_vm_vm_ops; 1768 return 0; 1769 } 1770 1771 static struct file_operations kvm_vm_fops = { 1772 .release = kvm_vm_release, 1773 .unlocked_ioctl = kvm_vm_ioctl, 1774 #ifdef CONFIG_COMPAT 1775 .compat_ioctl = kvm_vm_compat_ioctl, 1776 #endif 1777 .mmap = kvm_vm_mmap, 1778 .llseek = noop_llseek, 1779 }; 1780 1781 static int kvm_dev_ioctl_create_vm(void) 1782 { 1783 int fd, r; 1784 struct kvm *kvm; 1785 1786 kvm = kvm_create_vm(); 1787 if (IS_ERR(kvm)) 1788 return PTR_ERR(kvm); 1789 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1790 r = kvm_coalesced_mmio_init(kvm); 1791 if (r < 0) { 1792 kvm_put_kvm(kvm); 1793 return r; 1794 } 1795 #endif 1796 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 1797 if (fd < 0) 1798 kvm_put_kvm(kvm); 1799 1800 return fd; 1801 } 1802 1803 static long kvm_dev_ioctl_check_extension_generic(long arg) 1804 { 1805 switch (arg) { 1806 case KVM_CAP_USER_MEMORY: 1807 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1808 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 1809 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1810 case KVM_CAP_SET_BOOT_CPU_ID: 1811 #endif 1812 case KVM_CAP_INTERNAL_ERROR_DATA: 1813 return 1; 1814 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1815 case KVM_CAP_IRQ_ROUTING: 1816 return KVM_MAX_IRQ_ROUTES; 1817 #endif 1818 default: 1819 break; 1820 } 1821 return kvm_dev_ioctl_check_extension(arg); 1822 } 1823 1824 static long kvm_dev_ioctl(struct file *filp, 1825 unsigned int ioctl, unsigned long arg) 1826 { 1827 long r = -EINVAL; 1828 1829 switch (ioctl) { 1830 case KVM_GET_API_VERSION: 1831 r = -EINVAL; 1832 if (arg) 1833 goto out; 1834 r = KVM_API_VERSION; 1835 break; 1836 case KVM_CREATE_VM: 1837 r = -EINVAL; 1838 if (arg) 1839 goto out; 1840 r = kvm_dev_ioctl_create_vm(); 1841 break; 1842 case KVM_CHECK_EXTENSION: 1843 r = kvm_dev_ioctl_check_extension_generic(arg); 1844 break; 1845 case KVM_GET_VCPU_MMAP_SIZE: 1846 r = -EINVAL; 1847 if (arg) 1848 goto out; 1849 r = PAGE_SIZE; /* struct kvm_run */ 1850 #ifdef CONFIG_X86 1851 r += PAGE_SIZE; /* pio data page */ 1852 #endif 1853 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1854 r += PAGE_SIZE; /* coalesced mmio ring page */ 1855 #endif 1856 break; 1857 case KVM_TRACE_ENABLE: 1858 case KVM_TRACE_PAUSE: 1859 case KVM_TRACE_DISABLE: 1860 r = -EOPNOTSUPP; 1861 break; 1862 default: 1863 return kvm_arch_dev_ioctl(filp, ioctl, arg); 1864 } 1865 out: 1866 return r; 1867 } 1868 1869 static struct file_operations kvm_chardev_ops = { 1870 .unlocked_ioctl = kvm_dev_ioctl, 1871 .compat_ioctl = kvm_dev_ioctl, 1872 .llseek = noop_llseek, 1873 }; 1874 1875 static struct miscdevice kvm_dev = { 1876 KVM_MINOR, 1877 "kvm", 1878 &kvm_chardev_ops, 1879 }; 1880 1881 static void hardware_enable(void *junk) 1882 { 1883 int cpu = raw_smp_processor_id(); 1884 int r; 1885 1886 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1887 return; 1888 1889 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1890 1891 r = kvm_arch_hardware_enable(NULL); 1892 1893 if (r) { 1894 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1895 atomic_inc(&hardware_enable_failed); 1896 printk(KERN_INFO "kvm: enabling virtualization on " 1897 "CPU%d failed\n", cpu); 1898 } 1899 } 1900 1901 static void hardware_disable(void *junk) 1902 { 1903 int cpu = raw_smp_processor_id(); 1904 1905 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1906 return; 1907 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 1908 kvm_arch_hardware_disable(NULL); 1909 } 1910 1911 static void hardware_disable_all_nolock(void) 1912 { 1913 BUG_ON(!kvm_usage_count); 1914 1915 kvm_usage_count--; 1916 if (!kvm_usage_count) 1917 on_each_cpu(hardware_disable, NULL, 1); 1918 } 1919 1920 static void hardware_disable_all(void) 1921 { 1922 spin_lock(&kvm_lock); 1923 hardware_disable_all_nolock(); 1924 spin_unlock(&kvm_lock); 1925 } 1926 1927 static int hardware_enable_all(void) 1928 { 1929 int r = 0; 1930 1931 spin_lock(&kvm_lock); 1932 1933 kvm_usage_count++; 1934 if (kvm_usage_count == 1) { 1935 atomic_set(&hardware_enable_failed, 0); 1936 on_each_cpu(hardware_enable, NULL, 1); 1937 1938 if (atomic_read(&hardware_enable_failed)) { 1939 hardware_disable_all_nolock(); 1940 r = -EBUSY; 1941 } 1942 } 1943 1944 spin_unlock(&kvm_lock); 1945 1946 return r; 1947 } 1948 1949 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1950 void *v) 1951 { 1952 int cpu = (long)v; 1953 1954 if (!kvm_usage_count) 1955 return NOTIFY_OK; 1956 1957 val &= ~CPU_TASKS_FROZEN; 1958 switch (val) { 1959 case CPU_DYING: 1960 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1961 cpu); 1962 hardware_disable(NULL); 1963 break; 1964 case CPU_STARTING: 1965 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1966 cpu); 1967 hardware_enable(NULL); 1968 break; 1969 } 1970 return NOTIFY_OK; 1971 } 1972 1973 1974 asmlinkage void kvm_handle_fault_on_reboot(void) 1975 { 1976 if (kvm_rebooting) { 1977 /* spin while reset goes on */ 1978 local_irq_enable(); 1979 while (true) 1980 ; 1981 } 1982 /* Fault while not rebooting. We want the trace. */ 1983 BUG(); 1984 } 1985 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); 1986 1987 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 1988 void *v) 1989 { 1990 /* 1991 * Some (well, at least mine) BIOSes hang on reboot if 1992 * in vmx root mode. 1993 * 1994 * And Intel TXT required VMX off for all cpu when system shutdown. 1995 */ 1996 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 1997 kvm_rebooting = true; 1998 on_each_cpu(hardware_disable, NULL, 1); 1999 return NOTIFY_OK; 2000 } 2001 2002 static struct notifier_block kvm_reboot_notifier = { 2003 .notifier_call = kvm_reboot, 2004 .priority = 0, 2005 }; 2006 2007 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2008 { 2009 int i; 2010 2011 for (i = 0; i < bus->dev_count; i++) { 2012 struct kvm_io_device *pos = bus->devs[i]; 2013 2014 kvm_iodevice_destructor(pos); 2015 } 2016 kfree(bus); 2017 } 2018 2019 /* kvm_io_bus_write - called under kvm->slots_lock */ 2020 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2021 int len, const void *val) 2022 { 2023 int i; 2024 struct kvm_io_bus *bus; 2025 2026 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2027 for (i = 0; i < bus->dev_count; i++) 2028 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 2029 return 0; 2030 return -EOPNOTSUPP; 2031 } 2032 2033 /* kvm_io_bus_read - called under kvm->slots_lock */ 2034 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2035 int len, void *val) 2036 { 2037 int i; 2038 struct kvm_io_bus *bus; 2039 2040 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2041 for (i = 0; i < bus->dev_count; i++) 2042 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2043 return 0; 2044 return -EOPNOTSUPP; 2045 } 2046 2047 /* Caller must hold slots_lock. */ 2048 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2049 struct kvm_io_device *dev) 2050 { 2051 struct kvm_io_bus *new_bus, *bus; 2052 2053 bus = kvm->buses[bus_idx]; 2054 if (bus->dev_count > NR_IOBUS_DEVS-1) 2055 return -ENOSPC; 2056 2057 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2058 if (!new_bus) 2059 return -ENOMEM; 2060 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2061 new_bus->devs[new_bus->dev_count++] = dev; 2062 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2063 synchronize_srcu_expedited(&kvm->srcu); 2064 kfree(bus); 2065 2066 return 0; 2067 } 2068 2069 /* Caller must hold slots_lock. */ 2070 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2071 struct kvm_io_device *dev) 2072 { 2073 int i, r; 2074 struct kvm_io_bus *new_bus, *bus; 2075 2076 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2077 if (!new_bus) 2078 return -ENOMEM; 2079 2080 bus = kvm->buses[bus_idx]; 2081 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2082 2083 r = -ENOENT; 2084 for (i = 0; i < new_bus->dev_count; i++) 2085 if (new_bus->devs[i] == dev) { 2086 r = 0; 2087 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2088 break; 2089 } 2090 2091 if (r) { 2092 kfree(new_bus); 2093 return r; 2094 } 2095 2096 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2097 synchronize_srcu_expedited(&kvm->srcu); 2098 kfree(bus); 2099 return r; 2100 } 2101 2102 static struct notifier_block kvm_cpu_notifier = { 2103 .notifier_call = kvm_cpu_hotplug, 2104 }; 2105 2106 static int vm_stat_get(void *_offset, u64 *val) 2107 { 2108 unsigned offset = (long)_offset; 2109 struct kvm *kvm; 2110 2111 *val = 0; 2112 spin_lock(&kvm_lock); 2113 list_for_each_entry(kvm, &vm_list, vm_list) 2114 *val += *(u32 *)((void *)kvm + offset); 2115 spin_unlock(&kvm_lock); 2116 return 0; 2117 } 2118 2119 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2120 2121 static int vcpu_stat_get(void *_offset, u64 *val) 2122 { 2123 unsigned offset = (long)_offset; 2124 struct kvm *kvm; 2125 struct kvm_vcpu *vcpu; 2126 int i; 2127 2128 *val = 0; 2129 spin_lock(&kvm_lock); 2130 list_for_each_entry(kvm, &vm_list, vm_list) 2131 kvm_for_each_vcpu(i, vcpu, kvm) 2132 *val += *(u32 *)((void *)vcpu + offset); 2133 2134 spin_unlock(&kvm_lock); 2135 return 0; 2136 } 2137 2138 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2139 2140 static const struct file_operations *stat_fops[] = { 2141 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2142 [KVM_STAT_VM] = &vm_stat_fops, 2143 }; 2144 2145 static void kvm_init_debug(void) 2146 { 2147 struct kvm_stats_debugfs_item *p; 2148 2149 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2150 for (p = debugfs_entries; p->name; ++p) 2151 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2152 (void *)(long)p->offset, 2153 stat_fops[p->kind]); 2154 } 2155 2156 static void kvm_exit_debug(void) 2157 { 2158 struct kvm_stats_debugfs_item *p; 2159 2160 for (p = debugfs_entries; p->name; ++p) 2161 debugfs_remove(p->dentry); 2162 debugfs_remove(kvm_debugfs_dir); 2163 } 2164 2165 static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2166 { 2167 if (kvm_usage_count) 2168 hardware_disable(NULL); 2169 return 0; 2170 } 2171 2172 static int kvm_resume(struct sys_device *dev) 2173 { 2174 if (kvm_usage_count) 2175 hardware_enable(NULL); 2176 return 0; 2177 } 2178 2179 static struct sysdev_class kvm_sysdev_class = { 2180 .name = "kvm", 2181 .suspend = kvm_suspend, 2182 .resume = kvm_resume, 2183 }; 2184 2185 static struct sys_device kvm_sysdev = { 2186 .id = 0, 2187 .cls = &kvm_sysdev_class, 2188 }; 2189 2190 struct page *bad_page; 2191 pfn_t bad_pfn; 2192 2193 static inline 2194 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2195 { 2196 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2197 } 2198 2199 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2200 { 2201 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2202 2203 kvm_arch_vcpu_load(vcpu, cpu); 2204 } 2205 2206 static void kvm_sched_out(struct preempt_notifier *pn, 2207 struct task_struct *next) 2208 { 2209 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2210 2211 kvm_arch_vcpu_put(vcpu); 2212 } 2213 2214 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 2215 struct module *module) 2216 { 2217 int r; 2218 int cpu; 2219 2220 r = kvm_arch_init(opaque); 2221 if (r) 2222 goto out_fail; 2223 2224 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2225 2226 if (bad_page == NULL) { 2227 r = -ENOMEM; 2228 goto out; 2229 } 2230 2231 bad_pfn = page_to_pfn(bad_page); 2232 2233 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2234 2235 if (hwpoison_page == NULL) { 2236 r = -ENOMEM; 2237 goto out_free_0; 2238 } 2239 2240 hwpoison_pfn = page_to_pfn(hwpoison_page); 2241 2242 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2243 2244 if (fault_page == NULL) { 2245 r = -ENOMEM; 2246 goto out_free_0; 2247 } 2248 2249 fault_pfn = page_to_pfn(fault_page); 2250 2251 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2252 r = -ENOMEM; 2253 goto out_free_0; 2254 } 2255 2256 r = kvm_arch_hardware_setup(); 2257 if (r < 0) 2258 goto out_free_0a; 2259 2260 for_each_online_cpu(cpu) { 2261 smp_call_function_single(cpu, 2262 kvm_arch_check_processor_compat, 2263 &r, 1); 2264 if (r < 0) 2265 goto out_free_1; 2266 } 2267 2268 r = register_cpu_notifier(&kvm_cpu_notifier); 2269 if (r) 2270 goto out_free_2; 2271 register_reboot_notifier(&kvm_reboot_notifier); 2272 2273 r = sysdev_class_register(&kvm_sysdev_class); 2274 if (r) 2275 goto out_free_3; 2276 2277 r = sysdev_register(&kvm_sysdev); 2278 if (r) 2279 goto out_free_4; 2280 2281 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2282 if (!vcpu_align) 2283 vcpu_align = __alignof__(struct kvm_vcpu); 2284 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 2285 0, NULL); 2286 if (!kvm_vcpu_cache) { 2287 r = -ENOMEM; 2288 goto out_free_5; 2289 } 2290 2291 kvm_chardev_ops.owner = module; 2292 kvm_vm_fops.owner = module; 2293 kvm_vcpu_fops.owner = module; 2294 2295 r = misc_register(&kvm_dev); 2296 if (r) { 2297 printk(KERN_ERR "kvm: misc device register failed\n"); 2298 goto out_free; 2299 } 2300 2301 kvm_preempt_ops.sched_in = kvm_sched_in; 2302 kvm_preempt_ops.sched_out = kvm_sched_out; 2303 2304 kvm_init_debug(); 2305 2306 return 0; 2307 2308 out_free: 2309 kmem_cache_destroy(kvm_vcpu_cache); 2310 out_free_5: 2311 sysdev_unregister(&kvm_sysdev); 2312 out_free_4: 2313 sysdev_class_unregister(&kvm_sysdev_class); 2314 out_free_3: 2315 unregister_reboot_notifier(&kvm_reboot_notifier); 2316 unregister_cpu_notifier(&kvm_cpu_notifier); 2317 out_free_2: 2318 out_free_1: 2319 kvm_arch_hardware_unsetup(); 2320 out_free_0a: 2321 free_cpumask_var(cpus_hardware_enabled); 2322 out_free_0: 2323 if (fault_page) 2324 __free_page(fault_page); 2325 if (hwpoison_page) 2326 __free_page(hwpoison_page); 2327 __free_page(bad_page); 2328 out: 2329 kvm_arch_exit(); 2330 out_fail: 2331 return r; 2332 } 2333 EXPORT_SYMBOL_GPL(kvm_init); 2334 2335 void kvm_exit(void) 2336 { 2337 kvm_exit_debug(); 2338 misc_deregister(&kvm_dev); 2339 kmem_cache_destroy(kvm_vcpu_cache); 2340 sysdev_unregister(&kvm_sysdev); 2341 sysdev_class_unregister(&kvm_sysdev_class); 2342 unregister_reboot_notifier(&kvm_reboot_notifier); 2343 unregister_cpu_notifier(&kvm_cpu_notifier); 2344 on_each_cpu(hardware_disable, NULL, 1); 2345 kvm_arch_hardware_unsetup(); 2346 kvm_arch_exit(); 2347 free_cpumask_var(cpus_hardware_enabled); 2348 __free_page(hwpoison_page); 2349 __free_page(bad_page); 2350 } 2351 EXPORT_SYMBOL_GPL(kvm_exit); 2352