1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include "iodev.h" 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 51 #include <asm/processor.h> 52 #include <asm/io.h> 53 #include <asm/uaccess.h> 54 #include <asm/pgtable.h> 55 56 #include "coalesced_mmio.h" 57 #include "async_pf.h" 58 59 #define CREATE_TRACE_POINTS 60 #include <trace/events/kvm.h> 61 62 MODULE_AUTHOR("Qumranet"); 63 MODULE_LICENSE("GPL"); 64 65 /* 66 * Ordering of locks: 67 * 68 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 69 */ 70 71 DEFINE_RAW_SPINLOCK(kvm_lock); 72 LIST_HEAD(vm_list); 73 74 static cpumask_var_t cpus_hardware_enabled; 75 static int kvm_usage_count = 0; 76 static atomic_t hardware_enable_failed; 77 78 struct kmem_cache *kvm_vcpu_cache; 79 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 80 81 static __read_mostly struct preempt_ops kvm_preempt_ops; 82 83 struct dentry *kvm_debugfs_dir; 84 85 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 86 unsigned long arg); 87 static int hardware_enable_all(void); 88 static void hardware_disable_all(void); 89 90 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 91 92 bool kvm_rebooting; 93 EXPORT_SYMBOL_GPL(kvm_rebooting); 94 95 static bool largepages_enabled = true; 96 97 static struct page *hwpoison_page; 98 static pfn_t hwpoison_pfn; 99 100 static struct page *fault_page; 101 static pfn_t fault_pfn; 102 103 inline int kvm_is_mmio_pfn(pfn_t pfn) 104 { 105 if (pfn_valid(pfn)) { 106 int reserved; 107 struct page *tail = pfn_to_page(pfn); 108 struct page *head = compound_trans_head(tail); 109 reserved = PageReserved(head); 110 if (head != tail) { 111 /* 112 * "head" is not a dangling pointer 113 * (compound_trans_head takes care of that) 114 * but the hugepage may have been splitted 115 * from under us (and we may not hold a 116 * reference count on the head page so it can 117 * be reused before we run PageReferenced), so 118 * we've to check PageTail before returning 119 * what we just read. 120 */ 121 smp_rmb(); 122 if (PageTail(tail)) 123 return reserved; 124 } 125 return PageReserved(tail); 126 } 127 128 return true; 129 } 130 131 /* 132 * Switches to specified vcpu, until a matching vcpu_put() 133 */ 134 void vcpu_load(struct kvm_vcpu *vcpu) 135 { 136 int cpu; 137 138 mutex_lock(&vcpu->mutex); 139 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 140 /* The thread running this VCPU changed. */ 141 struct pid *oldpid = vcpu->pid; 142 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 143 rcu_assign_pointer(vcpu->pid, newpid); 144 synchronize_rcu(); 145 put_pid(oldpid); 146 } 147 cpu = get_cpu(); 148 preempt_notifier_register(&vcpu->preempt_notifier); 149 kvm_arch_vcpu_load(vcpu, cpu); 150 put_cpu(); 151 } 152 153 void vcpu_put(struct kvm_vcpu *vcpu) 154 { 155 preempt_disable(); 156 kvm_arch_vcpu_put(vcpu); 157 preempt_notifier_unregister(&vcpu->preempt_notifier); 158 preempt_enable(); 159 mutex_unlock(&vcpu->mutex); 160 } 161 162 static void ack_flush(void *_completed) 163 { 164 } 165 166 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) 167 { 168 int i, cpu, me; 169 cpumask_var_t cpus; 170 bool called = true; 171 struct kvm_vcpu *vcpu; 172 173 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 174 175 me = get_cpu(); 176 kvm_for_each_vcpu(i, vcpu, kvm) { 177 kvm_make_request(req, vcpu); 178 cpu = vcpu->cpu; 179 180 /* Set ->requests bit before we read ->mode */ 181 smp_mb(); 182 183 if (cpus != NULL && cpu != -1 && cpu != me && 184 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 185 cpumask_set_cpu(cpu, cpus); 186 } 187 if (unlikely(cpus == NULL)) 188 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 189 else if (!cpumask_empty(cpus)) 190 smp_call_function_many(cpus, ack_flush, NULL, 1); 191 else 192 called = false; 193 put_cpu(); 194 free_cpumask_var(cpus); 195 return called; 196 } 197 198 void kvm_flush_remote_tlbs(struct kvm *kvm) 199 { 200 int dirty_count = kvm->tlbs_dirty; 201 202 smp_mb(); 203 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 204 ++kvm->stat.remote_tlb_flush; 205 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 206 } 207 208 void kvm_reload_remote_mmus(struct kvm *kvm) 209 { 210 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 211 } 212 213 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 214 { 215 struct page *page; 216 int r; 217 218 mutex_init(&vcpu->mutex); 219 vcpu->cpu = -1; 220 vcpu->kvm = kvm; 221 vcpu->vcpu_id = id; 222 vcpu->pid = NULL; 223 init_waitqueue_head(&vcpu->wq); 224 kvm_async_pf_vcpu_init(vcpu); 225 226 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 227 if (!page) { 228 r = -ENOMEM; 229 goto fail; 230 } 231 vcpu->run = page_address(page); 232 233 r = kvm_arch_vcpu_init(vcpu); 234 if (r < 0) 235 goto fail_free_run; 236 return 0; 237 238 fail_free_run: 239 free_page((unsigned long)vcpu->run); 240 fail: 241 return r; 242 } 243 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 244 245 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 246 { 247 put_pid(vcpu->pid); 248 kvm_arch_vcpu_uninit(vcpu); 249 free_page((unsigned long)vcpu->run); 250 } 251 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 252 253 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 254 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 255 { 256 return container_of(mn, struct kvm, mmu_notifier); 257 } 258 259 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 260 struct mm_struct *mm, 261 unsigned long address) 262 { 263 struct kvm *kvm = mmu_notifier_to_kvm(mn); 264 int need_tlb_flush, idx; 265 266 /* 267 * When ->invalidate_page runs, the linux pte has been zapped 268 * already but the page is still allocated until 269 * ->invalidate_page returns. So if we increase the sequence 270 * here the kvm page fault will notice if the spte can't be 271 * established because the page is going to be freed. If 272 * instead the kvm page fault establishes the spte before 273 * ->invalidate_page runs, kvm_unmap_hva will release it 274 * before returning. 275 * 276 * The sequence increase only need to be seen at spin_unlock 277 * time, and not at spin_lock time. 278 * 279 * Increasing the sequence after the spin_unlock would be 280 * unsafe because the kvm page fault could then establish the 281 * pte after kvm_unmap_hva returned, without noticing the page 282 * is going to be freed. 283 */ 284 idx = srcu_read_lock(&kvm->srcu); 285 spin_lock(&kvm->mmu_lock); 286 kvm->mmu_notifier_seq++; 287 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 288 spin_unlock(&kvm->mmu_lock); 289 srcu_read_unlock(&kvm->srcu, idx); 290 291 /* we've to flush the tlb before the pages can be freed */ 292 if (need_tlb_flush) 293 kvm_flush_remote_tlbs(kvm); 294 295 } 296 297 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 298 struct mm_struct *mm, 299 unsigned long address, 300 pte_t pte) 301 { 302 struct kvm *kvm = mmu_notifier_to_kvm(mn); 303 int idx; 304 305 idx = srcu_read_lock(&kvm->srcu); 306 spin_lock(&kvm->mmu_lock); 307 kvm->mmu_notifier_seq++; 308 kvm_set_spte_hva(kvm, address, pte); 309 spin_unlock(&kvm->mmu_lock); 310 srcu_read_unlock(&kvm->srcu, idx); 311 } 312 313 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 314 struct mm_struct *mm, 315 unsigned long start, 316 unsigned long end) 317 { 318 struct kvm *kvm = mmu_notifier_to_kvm(mn); 319 int need_tlb_flush = 0, idx; 320 321 idx = srcu_read_lock(&kvm->srcu); 322 spin_lock(&kvm->mmu_lock); 323 /* 324 * The count increase must become visible at unlock time as no 325 * spte can be established without taking the mmu_lock and 326 * count is also read inside the mmu_lock critical section. 327 */ 328 kvm->mmu_notifier_count++; 329 for (; start < end; start += PAGE_SIZE) 330 need_tlb_flush |= kvm_unmap_hva(kvm, start); 331 need_tlb_flush |= kvm->tlbs_dirty; 332 spin_unlock(&kvm->mmu_lock); 333 srcu_read_unlock(&kvm->srcu, idx); 334 335 /* we've to flush the tlb before the pages can be freed */ 336 if (need_tlb_flush) 337 kvm_flush_remote_tlbs(kvm); 338 } 339 340 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 341 struct mm_struct *mm, 342 unsigned long start, 343 unsigned long end) 344 { 345 struct kvm *kvm = mmu_notifier_to_kvm(mn); 346 347 spin_lock(&kvm->mmu_lock); 348 /* 349 * This sequence increase will notify the kvm page fault that 350 * the page that is going to be mapped in the spte could have 351 * been freed. 352 */ 353 kvm->mmu_notifier_seq++; 354 /* 355 * The above sequence increase must be visible before the 356 * below count decrease but both values are read by the kvm 357 * page fault under mmu_lock spinlock so we don't need to add 358 * a smb_wmb() here in between the two. 359 */ 360 kvm->mmu_notifier_count--; 361 spin_unlock(&kvm->mmu_lock); 362 363 BUG_ON(kvm->mmu_notifier_count < 0); 364 } 365 366 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 367 struct mm_struct *mm, 368 unsigned long address) 369 { 370 struct kvm *kvm = mmu_notifier_to_kvm(mn); 371 int young, idx; 372 373 idx = srcu_read_lock(&kvm->srcu); 374 spin_lock(&kvm->mmu_lock); 375 young = kvm_age_hva(kvm, address); 376 spin_unlock(&kvm->mmu_lock); 377 srcu_read_unlock(&kvm->srcu, idx); 378 379 if (young) 380 kvm_flush_remote_tlbs(kvm); 381 382 return young; 383 } 384 385 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 386 struct mm_struct *mm, 387 unsigned long address) 388 { 389 struct kvm *kvm = mmu_notifier_to_kvm(mn); 390 int young, idx; 391 392 idx = srcu_read_lock(&kvm->srcu); 393 spin_lock(&kvm->mmu_lock); 394 young = kvm_test_age_hva(kvm, address); 395 spin_unlock(&kvm->mmu_lock); 396 srcu_read_unlock(&kvm->srcu, idx); 397 398 return young; 399 } 400 401 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 402 struct mm_struct *mm) 403 { 404 struct kvm *kvm = mmu_notifier_to_kvm(mn); 405 int idx; 406 407 idx = srcu_read_lock(&kvm->srcu); 408 kvm_arch_flush_shadow(kvm); 409 srcu_read_unlock(&kvm->srcu, idx); 410 } 411 412 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 413 .invalidate_page = kvm_mmu_notifier_invalidate_page, 414 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 415 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 416 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 417 .test_young = kvm_mmu_notifier_test_young, 418 .change_pte = kvm_mmu_notifier_change_pte, 419 .release = kvm_mmu_notifier_release, 420 }; 421 422 static int kvm_init_mmu_notifier(struct kvm *kvm) 423 { 424 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 425 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 426 } 427 428 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 429 430 static int kvm_init_mmu_notifier(struct kvm *kvm) 431 { 432 return 0; 433 } 434 435 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 436 437 static struct kvm *kvm_create_vm(void) 438 { 439 int r, i; 440 struct kvm *kvm = kvm_arch_alloc_vm(); 441 442 if (!kvm) 443 return ERR_PTR(-ENOMEM); 444 445 r = kvm_arch_init_vm(kvm); 446 if (r) 447 goto out_err_nodisable; 448 449 r = hardware_enable_all(); 450 if (r) 451 goto out_err_nodisable; 452 453 #ifdef CONFIG_HAVE_KVM_IRQCHIP 454 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 455 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 456 #endif 457 458 r = -ENOMEM; 459 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 460 if (!kvm->memslots) 461 goto out_err_nosrcu; 462 if (init_srcu_struct(&kvm->srcu)) 463 goto out_err_nosrcu; 464 for (i = 0; i < KVM_NR_BUSES; i++) { 465 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 466 GFP_KERNEL); 467 if (!kvm->buses[i]) 468 goto out_err; 469 } 470 471 r = kvm_init_mmu_notifier(kvm); 472 if (r) 473 goto out_err; 474 475 kvm->mm = current->mm; 476 atomic_inc(&kvm->mm->mm_count); 477 spin_lock_init(&kvm->mmu_lock); 478 kvm_eventfd_init(kvm); 479 mutex_init(&kvm->lock); 480 mutex_init(&kvm->irq_lock); 481 mutex_init(&kvm->slots_lock); 482 atomic_set(&kvm->users_count, 1); 483 raw_spin_lock(&kvm_lock); 484 list_add(&kvm->vm_list, &vm_list); 485 raw_spin_unlock(&kvm_lock); 486 487 return kvm; 488 489 out_err: 490 cleanup_srcu_struct(&kvm->srcu); 491 out_err_nosrcu: 492 hardware_disable_all(); 493 out_err_nodisable: 494 for (i = 0; i < KVM_NR_BUSES; i++) 495 kfree(kvm->buses[i]); 496 kfree(kvm->memslots); 497 kvm_arch_free_vm(kvm); 498 return ERR_PTR(r); 499 } 500 501 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 502 { 503 if (!memslot->dirty_bitmap) 504 return; 505 506 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) 507 vfree(memslot->dirty_bitmap_head); 508 else 509 kfree(memslot->dirty_bitmap_head); 510 511 memslot->dirty_bitmap = NULL; 512 memslot->dirty_bitmap_head = NULL; 513 } 514 515 /* 516 * Free any memory in @free but not in @dont. 517 */ 518 static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 519 struct kvm_memory_slot *dont) 520 { 521 int i; 522 523 if (!dont || free->rmap != dont->rmap) 524 vfree(free->rmap); 525 526 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 527 kvm_destroy_dirty_bitmap(free); 528 529 530 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 531 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { 532 vfree(free->lpage_info[i]); 533 free->lpage_info[i] = NULL; 534 } 535 } 536 537 free->npages = 0; 538 free->rmap = NULL; 539 } 540 541 void kvm_free_physmem(struct kvm *kvm) 542 { 543 int i; 544 struct kvm_memslots *slots = kvm->memslots; 545 546 for (i = 0; i < slots->nmemslots; ++i) 547 kvm_free_physmem_slot(&slots->memslots[i], NULL); 548 549 kfree(kvm->memslots); 550 } 551 552 static void kvm_destroy_vm(struct kvm *kvm) 553 { 554 int i; 555 struct mm_struct *mm = kvm->mm; 556 557 kvm_arch_sync_events(kvm); 558 raw_spin_lock(&kvm_lock); 559 list_del(&kvm->vm_list); 560 raw_spin_unlock(&kvm_lock); 561 kvm_free_irq_routing(kvm); 562 for (i = 0; i < KVM_NR_BUSES; i++) 563 kvm_io_bus_destroy(kvm->buses[i]); 564 kvm_coalesced_mmio_free(kvm); 565 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 566 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 567 #else 568 kvm_arch_flush_shadow(kvm); 569 #endif 570 kvm_arch_destroy_vm(kvm); 571 kvm_free_physmem(kvm); 572 cleanup_srcu_struct(&kvm->srcu); 573 kvm_arch_free_vm(kvm); 574 hardware_disable_all(); 575 mmdrop(mm); 576 } 577 578 void kvm_get_kvm(struct kvm *kvm) 579 { 580 atomic_inc(&kvm->users_count); 581 } 582 EXPORT_SYMBOL_GPL(kvm_get_kvm); 583 584 void kvm_put_kvm(struct kvm *kvm) 585 { 586 if (atomic_dec_and_test(&kvm->users_count)) 587 kvm_destroy_vm(kvm); 588 } 589 EXPORT_SYMBOL_GPL(kvm_put_kvm); 590 591 592 static int kvm_vm_release(struct inode *inode, struct file *filp) 593 { 594 struct kvm *kvm = filp->private_data; 595 596 kvm_irqfd_release(kvm); 597 598 kvm_put_kvm(kvm); 599 return 0; 600 } 601 602 #ifndef CONFIG_S390 603 /* 604 * Allocation size is twice as large as the actual dirty bitmap size. 605 * This makes it possible to do double buffering: see x86's 606 * kvm_vm_ioctl_get_dirty_log(). 607 */ 608 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 609 { 610 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 611 612 if (dirty_bytes > PAGE_SIZE) 613 memslot->dirty_bitmap = vzalloc(dirty_bytes); 614 else 615 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL); 616 617 if (!memslot->dirty_bitmap) 618 return -ENOMEM; 619 620 memslot->dirty_bitmap_head = memslot->dirty_bitmap; 621 return 0; 622 } 623 #endif /* !CONFIG_S390 */ 624 625 /* 626 * Allocate some memory and give it an address in the guest physical address 627 * space. 628 * 629 * Discontiguous memory is allowed, mostly for framebuffers. 630 * 631 * Must be called holding mmap_sem for write. 632 */ 633 int __kvm_set_memory_region(struct kvm *kvm, 634 struct kvm_userspace_memory_region *mem, 635 int user_alloc) 636 { 637 int r; 638 gfn_t base_gfn; 639 unsigned long npages; 640 unsigned long i; 641 struct kvm_memory_slot *memslot; 642 struct kvm_memory_slot old, new; 643 struct kvm_memslots *slots, *old_memslots; 644 645 r = -EINVAL; 646 /* General sanity checks */ 647 if (mem->memory_size & (PAGE_SIZE - 1)) 648 goto out; 649 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 650 goto out; 651 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) 652 goto out; 653 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 654 goto out; 655 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 656 goto out; 657 658 memslot = &kvm->memslots->memslots[mem->slot]; 659 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 660 npages = mem->memory_size >> PAGE_SHIFT; 661 662 r = -EINVAL; 663 if (npages > KVM_MEM_MAX_NR_PAGES) 664 goto out; 665 666 if (!npages) 667 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 668 669 new = old = *memslot; 670 671 new.id = mem->slot; 672 new.base_gfn = base_gfn; 673 new.npages = npages; 674 new.flags = mem->flags; 675 676 /* Disallow changing a memory slot's size. */ 677 r = -EINVAL; 678 if (npages && old.npages && npages != old.npages) 679 goto out_free; 680 681 /* Check for overlaps */ 682 r = -EEXIST; 683 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 684 struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; 685 686 if (s == memslot || !s->npages) 687 continue; 688 if (!((base_gfn + npages <= s->base_gfn) || 689 (base_gfn >= s->base_gfn + s->npages))) 690 goto out_free; 691 } 692 693 /* Free page dirty bitmap if unneeded */ 694 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 695 new.dirty_bitmap = NULL; 696 697 r = -ENOMEM; 698 699 /* Allocate if a slot is being created */ 700 #ifndef CONFIG_S390 701 if (npages && !new.rmap) { 702 new.rmap = vzalloc(npages * sizeof(*new.rmap)); 703 704 if (!new.rmap) 705 goto out_free; 706 707 new.user_alloc = user_alloc; 708 new.userspace_addr = mem->userspace_addr; 709 } 710 if (!npages) 711 goto skip_lpage; 712 713 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 714 unsigned long ugfn; 715 unsigned long j; 716 int lpages; 717 int level = i + 2; 718 719 /* Avoid unused variable warning if no large pages */ 720 (void)level; 721 722 if (new.lpage_info[i]) 723 continue; 724 725 lpages = 1 + ((base_gfn + npages - 1) 726 >> KVM_HPAGE_GFN_SHIFT(level)); 727 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level); 728 729 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i])); 730 731 if (!new.lpage_info[i]) 732 goto out_free; 733 734 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 735 new.lpage_info[i][0].write_count = 1; 736 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 737 new.lpage_info[i][lpages - 1].write_count = 1; 738 ugfn = new.userspace_addr >> PAGE_SHIFT; 739 /* 740 * If the gfn and userspace address are not aligned wrt each 741 * other, or if explicitly asked to, disable large page 742 * support for this slot 743 */ 744 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || 745 !largepages_enabled) 746 for (j = 0; j < lpages; ++j) 747 new.lpage_info[i][j].write_count = 1; 748 } 749 750 skip_lpage: 751 752 /* Allocate page dirty bitmap if needed */ 753 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 754 if (kvm_create_dirty_bitmap(&new) < 0) 755 goto out_free; 756 /* destroy any largepage mappings for dirty tracking */ 757 } 758 #else /* not defined CONFIG_S390 */ 759 new.user_alloc = user_alloc; 760 if (user_alloc) 761 new.userspace_addr = mem->userspace_addr; 762 #endif /* not defined CONFIG_S390 */ 763 764 if (!npages) { 765 r = -ENOMEM; 766 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 767 if (!slots) 768 goto out_free; 769 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 770 if (mem->slot >= slots->nmemslots) 771 slots->nmemslots = mem->slot + 1; 772 slots->generation++; 773 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID; 774 775 old_memslots = kvm->memslots; 776 rcu_assign_pointer(kvm->memslots, slots); 777 synchronize_srcu_expedited(&kvm->srcu); 778 /* From this point no new shadow pages pointing to a deleted 779 * memslot will be created. 780 * 781 * validation of sp->gfn happens in: 782 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 783 * - kvm_is_visible_gfn (mmu_check_roots) 784 */ 785 kvm_arch_flush_shadow(kvm); 786 kfree(old_memslots); 787 } 788 789 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); 790 if (r) 791 goto out_free; 792 793 /* map the pages in iommu page table */ 794 if (npages) { 795 r = kvm_iommu_map_pages(kvm, &new); 796 if (r) 797 goto out_free; 798 } 799 800 r = -ENOMEM; 801 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 802 if (!slots) 803 goto out_free; 804 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 805 if (mem->slot >= slots->nmemslots) 806 slots->nmemslots = mem->slot + 1; 807 slots->generation++; 808 809 /* actual memory is freed via old in kvm_free_physmem_slot below */ 810 if (!npages) { 811 new.rmap = NULL; 812 new.dirty_bitmap = NULL; 813 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 814 new.lpage_info[i] = NULL; 815 } 816 817 slots->memslots[mem->slot] = new; 818 old_memslots = kvm->memslots; 819 rcu_assign_pointer(kvm->memslots, slots); 820 synchronize_srcu_expedited(&kvm->srcu); 821 822 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 823 824 kvm_free_physmem_slot(&old, &new); 825 kfree(old_memslots); 826 827 return 0; 828 829 out_free: 830 kvm_free_physmem_slot(&new, &old); 831 out: 832 return r; 833 834 } 835 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 836 837 int kvm_set_memory_region(struct kvm *kvm, 838 struct kvm_userspace_memory_region *mem, 839 int user_alloc) 840 { 841 int r; 842 843 mutex_lock(&kvm->slots_lock); 844 r = __kvm_set_memory_region(kvm, mem, user_alloc); 845 mutex_unlock(&kvm->slots_lock); 846 return r; 847 } 848 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 849 850 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 851 struct 852 kvm_userspace_memory_region *mem, 853 int user_alloc) 854 { 855 if (mem->slot >= KVM_MEMORY_SLOTS) 856 return -EINVAL; 857 return kvm_set_memory_region(kvm, mem, user_alloc); 858 } 859 860 int kvm_get_dirty_log(struct kvm *kvm, 861 struct kvm_dirty_log *log, int *is_dirty) 862 { 863 struct kvm_memory_slot *memslot; 864 int r, i; 865 unsigned long n; 866 unsigned long any = 0; 867 868 r = -EINVAL; 869 if (log->slot >= KVM_MEMORY_SLOTS) 870 goto out; 871 872 memslot = &kvm->memslots->memslots[log->slot]; 873 r = -ENOENT; 874 if (!memslot->dirty_bitmap) 875 goto out; 876 877 n = kvm_dirty_bitmap_bytes(memslot); 878 879 for (i = 0; !any && i < n/sizeof(long); ++i) 880 any = memslot->dirty_bitmap[i]; 881 882 r = -EFAULT; 883 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 884 goto out; 885 886 if (any) 887 *is_dirty = 1; 888 889 r = 0; 890 out: 891 return r; 892 } 893 894 void kvm_disable_largepages(void) 895 { 896 largepages_enabled = false; 897 } 898 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 899 900 int is_error_page(struct page *page) 901 { 902 return page == bad_page || page == hwpoison_page || page == fault_page; 903 } 904 EXPORT_SYMBOL_GPL(is_error_page); 905 906 int is_error_pfn(pfn_t pfn) 907 { 908 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; 909 } 910 EXPORT_SYMBOL_GPL(is_error_pfn); 911 912 int is_hwpoison_pfn(pfn_t pfn) 913 { 914 return pfn == hwpoison_pfn; 915 } 916 EXPORT_SYMBOL_GPL(is_hwpoison_pfn); 917 918 int is_fault_pfn(pfn_t pfn) 919 { 920 return pfn == fault_pfn; 921 } 922 EXPORT_SYMBOL_GPL(is_fault_pfn); 923 924 static inline unsigned long bad_hva(void) 925 { 926 return PAGE_OFFSET; 927 } 928 929 int kvm_is_error_hva(unsigned long addr) 930 { 931 return addr == bad_hva(); 932 } 933 EXPORT_SYMBOL_GPL(kvm_is_error_hva); 934 935 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, 936 gfn_t gfn) 937 { 938 int i; 939 940 for (i = 0; i < slots->nmemslots; ++i) { 941 struct kvm_memory_slot *memslot = &slots->memslots[i]; 942 943 if (gfn >= memslot->base_gfn 944 && gfn < memslot->base_gfn + memslot->npages) 945 return memslot; 946 } 947 return NULL; 948 } 949 950 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 951 { 952 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 953 } 954 EXPORT_SYMBOL_GPL(gfn_to_memslot); 955 956 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 957 { 958 int i; 959 struct kvm_memslots *slots = kvm_memslots(kvm); 960 961 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 962 struct kvm_memory_slot *memslot = &slots->memslots[i]; 963 964 if (memslot->flags & KVM_MEMSLOT_INVALID) 965 continue; 966 967 if (gfn >= memslot->base_gfn 968 && gfn < memslot->base_gfn + memslot->npages) 969 return 1; 970 } 971 return 0; 972 } 973 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 974 975 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 976 { 977 struct vm_area_struct *vma; 978 unsigned long addr, size; 979 980 size = PAGE_SIZE; 981 982 addr = gfn_to_hva(kvm, gfn); 983 if (kvm_is_error_hva(addr)) 984 return PAGE_SIZE; 985 986 down_read(¤t->mm->mmap_sem); 987 vma = find_vma(current->mm, addr); 988 if (!vma) 989 goto out; 990 991 size = vma_kernel_pagesize(vma); 992 993 out: 994 up_read(¤t->mm->mmap_sem); 995 996 return size; 997 } 998 999 int memslot_id(struct kvm *kvm, gfn_t gfn) 1000 { 1001 int i; 1002 struct kvm_memslots *slots = kvm_memslots(kvm); 1003 struct kvm_memory_slot *memslot = NULL; 1004 1005 for (i = 0; i < slots->nmemslots; ++i) { 1006 memslot = &slots->memslots[i]; 1007 1008 if (gfn >= memslot->base_gfn 1009 && gfn < memslot->base_gfn + memslot->npages) 1010 break; 1011 } 1012 1013 return memslot - slots->memslots; 1014 } 1015 1016 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1017 gfn_t *nr_pages) 1018 { 1019 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1020 return bad_hva(); 1021 1022 if (nr_pages) 1023 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1024 1025 return gfn_to_hva_memslot(slot, gfn); 1026 } 1027 1028 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1029 { 1030 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1031 } 1032 EXPORT_SYMBOL_GPL(gfn_to_hva); 1033 1034 static pfn_t get_fault_pfn(void) 1035 { 1036 get_page(fault_page); 1037 return fault_pfn; 1038 } 1039 1040 int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1041 unsigned long start, int write, struct page **page) 1042 { 1043 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1044 1045 if (write) 1046 flags |= FOLL_WRITE; 1047 1048 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1049 } 1050 1051 static inline int check_user_page_hwpoison(unsigned long addr) 1052 { 1053 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1054 1055 rc = __get_user_pages(current, current->mm, addr, 1, 1056 flags, NULL, NULL, NULL); 1057 return rc == -EHWPOISON; 1058 } 1059 1060 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, 1061 bool *async, bool write_fault, bool *writable) 1062 { 1063 struct page *page[1]; 1064 int npages = 0; 1065 pfn_t pfn; 1066 1067 /* we can do it either atomically or asynchronously, not both */ 1068 BUG_ON(atomic && async); 1069 1070 BUG_ON(!write_fault && !writable); 1071 1072 if (writable) 1073 *writable = true; 1074 1075 if (atomic || async) 1076 npages = __get_user_pages_fast(addr, 1, 1, page); 1077 1078 if (unlikely(npages != 1) && !atomic) { 1079 might_sleep(); 1080 1081 if (writable) 1082 *writable = write_fault; 1083 1084 if (async) { 1085 down_read(¤t->mm->mmap_sem); 1086 npages = get_user_page_nowait(current, current->mm, 1087 addr, write_fault, page); 1088 up_read(¤t->mm->mmap_sem); 1089 } else 1090 npages = get_user_pages_fast(addr, 1, write_fault, 1091 page); 1092 1093 /* map read fault as writable if possible */ 1094 if (unlikely(!write_fault) && npages == 1) { 1095 struct page *wpage[1]; 1096 1097 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1098 if (npages == 1) { 1099 *writable = true; 1100 put_page(page[0]); 1101 page[0] = wpage[0]; 1102 } 1103 npages = 1; 1104 } 1105 } 1106 1107 if (unlikely(npages != 1)) { 1108 struct vm_area_struct *vma; 1109 1110 if (atomic) 1111 return get_fault_pfn(); 1112 1113 down_read(¤t->mm->mmap_sem); 1114 if (npages == -EHWPOISON || 1115 (!async && check_user_page_hwpoison(addr))) { 1116 up_read(¤t->mm->mmap_sem); 1117 get_page(hwpoison_page); 1118 return page_to_pfn(hwpoison_page); 1119 } 1120 1121 vma = find_vma_intersection(current->mm, addr, addr+1); 1122 1123 if (vma == NULL) 1124 pfn = get_fault_pfn(); 1125 else if ((vma->vm_flags & VM_PFNMAP)) { 1126 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1127 vma->vm_pgoff; 1128 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1129 } else { 1130 if (async && (vma->vm_flags & VM_WRITE)) 1131 *async = true; 1132 pfn = get_fault_pfn(); 1133 } 1134 up_read(¤t->mm->mmap_sem); 1135 } else 1136 pfn = page_to_pfn(page[0]); 1137 1138 return pfn; 1139 } 1140 1141 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) 1142 { 1143 return hva_to_pfn(kvm, addr, true, NULL, true, NULL); 1144 } 1145 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); 1146 1147 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, 1148 bool write_fault, bool *writable) 1149 { 1150 unsigned long addr; 1151 1152 if (async) 1153 *async = false; 1154 1155 addr = gfn_to_hva(kvm, gfn); 1156 if (kvm_is_error_hva(addr)) { 1157 get_page(bad_page); 1158 return page_to_pfn(bad_page); 1159 } 1160 1161 return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); 1162 } 1163 1164 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1165 { 1166 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); 1167 } 1168 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1169 1170 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 1171 bool write_fault, bool *writable) 1172 { 1173 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); 1174 } 1175 EXPORT_SYMBOL_GPL(gfn_to_pfn_async); 1176 1177 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1178 { 1179 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); 1180 } 1181 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1182 1183 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1184 bool *writable) 1185 { 1186 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); 1187 } 1188 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1189 1190 pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 1191 struct kvm_memory_slot *slot, gfn_t gfn) 1192 { 1193 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1194 return hva_to_pfn(kvm, addr, false, NULL, true, NULL); 1195 } 1196 1197 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1198 int nr_pages) 1199 { 1200 unsigned long addr; 1201 gfn_t entry; 1202 1203 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); 1204 if (kvm_is_error_hva(addr)) 1205 return -1; 1206 1207 if (entry < nr_pages) 1208 return 0; 1209 1210 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1211 } 1212 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1213 1214 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1215 { 1216 pfn_t pfn; 1217 1218 pfn = gfn_to_pfn(kvm, gfn); 1219 if (!kvm_is_mmio_pfn(pfn)) 1220 return pfn_to_page(pfn); 1221 1222 WARN_ON(kvm_is_mmio_pfn(pfn)); 1223 1224 get_page(bad_page); 1225 return bad_page; 1226 } 1227 1228 EXPORT_SYMBOL_GPL(gfn_to_page); 1229 1230 void kvm_release_page_clean(struct page *page) 1231 { 1232 kvm_release_pfn_clean(page_to_pfn(page)); 1233 } 1234 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1235 1236 void kvm_release_pfn_clean(pfn_t pfn) 1237 { 1238 if (!kvm_is_mmio_pfn(pfn)) 1239 put_page(pfn_to_page(pfn)); 1240 } 1241 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1242 1243 void kvm_release_page_dirty(struct page *page) 1244 { 1245 kvm_release_pfn_dirty(page_to_pfn(page)); 1246 } 1247 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1248 1249 void kvm_release_pfn_dirty(pfn_t pfn) 1250 { 1251 kvm_set_pfn_dirty(pfn); 1252 kvm_release_pfn_clean(pfn); 1253 } 1254 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 1255 1256 void kvm_set_page_dirty(struct page *page) 1257 { 1258 kvm_set_pfn_dirty(page_to_pfn(page)); 1259 } 1260 EXPORT_SYMBOL_GPL(kvm_set_page_dirty); 1261 1262 void kvm_set_pfn_dirty(pfn_t pfn) 1263 { 1264 if (!kvm_is_mmio_pfn(pfn)) { 1265 struct page *page = pfn_to_page(pfn); 1266 if (!PageReserved(page)) 1267 SetPageDirty(page); 1268 } 1269 } 1270 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1271 1272 void kvm_set_pfn_accessed(pfn_t pfn) 1273 { 1274 if (!kvm_is_mmio_pfn(pfn)) 1275 mark_page_accessed(pfn_to_page(pfn)); 1276 } 1277 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1278 1279 void kvm_get_pfn(pfn_t pfn) 1280 { 1281 if (!kvm_is_mmio_pfn(pfn)) 1282 get_page(pfn_to_page(pfn)); 1283 } 1284 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1285 1286 static int next_segment(unsigned long len, int offset) 1287 { 1288 if (len > PAGE_SIZE - offset) 1289 return PAGE_SIZE - offset; 1290 else 1291 return len; 1292 } 1293 1294 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1295 int len) 1296 { 1297 int r; 1298 unsigned long addr; 1299 1300 addr = gfn_to_hva(kvm, gfn); 1301 if (kvm_is_error_hva(addr)) 1302 return -EFAULT; 1303 r = copy_from_user(data, (void __user *)addr + offset, len); 1304 if (r) 1305 return -EFAULT; 1306 return 0; 1307 } 1308 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1309 1310 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1311 { 1312 gfn_t gfn = gpa >> PAGE_SHIFT; 1313 int seg; 1314 int offset = offset_in_page(gpa); 1315 int ret; 1316 1317 while ((seg = next_segment(len, offset)) != 0) { 1318 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1319 if (ret < 0) 1320 return ret; 1321 offset = 0; 1322 len -= seg; 1323 data += seg; 1324 ++gfn; 1325 } 1326 return 0; 1327 } 1328 EXPORT_SYMBOL_GPL(kvm_read_guest); 1329 1330 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1331 unsigned long len) 1332 { 1333 int r; 1334 unsigned long addr; 1335 gfn_t gfn = gpa >> PAGE_SHIFT; 1336 int offset = offset_in_page(gpa); 1337 1338 addr = gfn_to_hva(kvm, gfn); 1339 if (kvm_is_error_hva(addr)) 1340 return -EFAULT; 1341 pagefault_disable(); 1342 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1343 pagefault_enable(); 1344 if (r) 1345 return -EFAULT; 1346 return 0; 1347 } 1348 EXPORT_SYMBOL(kvm_read_guest_atomic); 1349 1350 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1351 int offset, int len) 1352 { 1353 int r; 1354 unsigned long addr; 1355 1356 addr = gfn_to_hva(kvm, gfn); 1357 if (kvm_is_error_hva(addr)) 1358 return -EFAULT; 1359 r = copy_to_user((void __user *)addr + offset, data, len); 1360 if (r) 1361 return -EFAULT; 1362 mark_page_dirty(kvm, gfn); 1363 return 0; 1364 } 1365 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1366 1367 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1368 unsigned long len) 1369 { 1370 gfn_t gfn = gpa >> PAGE_SHIFT; 1371 int seg; 1372 int offset = offset_in_page(gpa); 1373 int ret; 1374 1375 while ((seg = next_segment(len, offset)) != 0) { 1376 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1377 if (ret < 0) 1378 return ret; 1379 offset = 0; 1380 len -= seg; 1381 data += seg; 1382 ++gfn; 1383 } 1384 return 0; 1385 } 1386 1387 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1388 gpa_t gpa) 1389 { 1390 struct kvm_memslots *slots = kvm_memslots(kvm); 1391 int offset = offset_in_page(gpa); 1392 gfn_t gfn = gpa >> PAGE_SHIFT; 1393 1394 ghc->gpa = gpa; 1395 ghc->generation = slots->generation; 1396 ghc->memslot = __gfn_to_memslot(slots, gfn); 1397 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); 1398 if (!kvm_is_error_hva(ghc->hva)) 1399 ghc->hva += offset; 1400 else 1401 return -EFAULT; 1402 1403 return 0; 1404 } 1405 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1406 1407 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1408 void *data, unsigned long len) 1409 { 1410 struct kvm_memslots *slots = kvm_memslots(kvm); 1411 int r; 1412 1413 if (slots->generation != ghc->generation) 1414 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1415 1416 if (kvm_is_error_hva(ghc->hva)) 1417 return -EFAULT; 1418 1419 r = copy_to_user((void __user *)ghc->hva, data, len); 1420 if (r) 1421 return -EFAULT; 1422 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1423 1424 return 0; 1425 } 1426 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1427 1428 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1429 { 1430 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, 1431 offset, len); 1432 } 1433 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1434 1435 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1436 { 1437 gfn_t gfn = gpa >> PAGE_SHIFT; 1438 int seg; 1439 int offset = offset_in_page(gpa); 1440 int ret; 1441 1442 while ((seg = next_segment(len, offset)) != 0) { 1443 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1444 if (ret < 0) 1445 return ret; 1446 offset = 0; 1447 len -= seg; 1448 ++gfn; 1449 } 1450 return 0; 1451 } 1452 EXPORT_SYMBOL_GPL(kvm_clear_guest); 1453 1454 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, 1455 gfn_t gfn) 1456 { 1457 if (memslot && memslot->dirty_bitmap) { 1458 unsigned long rel_gfn = gfn - memslot->base_gfn; 1459 1460 __set_bit_le(rel_gfn, memslot->dirty_bitmap); 1461 } 1462 } 1463 1464 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1465 { 1466 struct kvm_memory_slot *memslot; 1467 1468 memslot = gfn_to_memslot(kvm, gfn); 1469 mark_page_dirty_in_slot(kvm, memslot, gfn); 1470 } 1471 1472 /* 1473 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 1474 */ 1475 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 1476 { 1477 DEFINE_WAIT(wait); 1478 1479 for (;;) { 1480 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1481 1482 if (kvm_arch_vcpu_runnable(vcpu)) { 1483 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1484 break; 1485 } 1486 if (kvm_cpu_has_pending_timer(vcpu)) 1487 break; 1488 if (signal_pending(current)) 1489 break; 1490 1491 schedule(); 1492 } 1493 1494 finish_wait(&vcpu->wq, &wait); 1495 } 1496 1497 void kvm_resched(struct kvm_vcpu *vcpu) 1498 { 1499 if (!need_resched()) 1500 return; 1501 cond_resched(); 1502 } 1503 EXPORT_SYMBOL_GPL(kvm_resched); 1504 1505 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1506 { 1507 struct kvm *kvm = me->kvm; 1508 struct kvm_vcpu *vcpu; 1509 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1510 int yielded = 0; 1511 int pass; 1512 int i; 1513 1514 /* 1515 * We boost the priority of a VCPU that is runnable but not 1516 * currently running, because it got preempted by something 1517 * else and called schedule in __vcpu_run. Hopefully that 1518 * VCPU is holding the lock that we need and will release it. 1519 * We approximate round-robin by starting at the last boosted VCPU. 1520 */ 1521 for (pass = 0; pass < 2 && !yielded; pass++) { 1522 kvm_for_each_vcpu(i, vcpu, kvm) { 1523 struct task_struct *task = NULL; 1524 struct pid *pid; 1525 if (!pass && i < last_boosted_vcpu) { 1526 i = last_boosted_vcpu; 1527 continue; 1528 } else if (pass && i > last_boosted_vcpu) 1529 break; 1530 if (vcpu == me) 1531 continue; 1532 if (waitqueue_active(&vcpu->wq)) 1533 continue; 1534 rcu_read_lock(); 1535 pid = rcu_dereference(vcpu->pid); 1536 if (pid) 1537 task = get_pid_task(vcpu->pid, PIDTYPE_PID); 1538 rcu_read_unlock(); 1539 if (!task) 1540 continue; 1541 if (task->flags & PF_VCPU) { 1542 put_task_struct(task); 1543 continue; 1544 } 1545 if (yield_to(task, 1)) { 1546 put_task_struct(task); 1547 kvm->last_boosted_vcpu = i; 1548 yielded = 1; 1549 break; 1550 } 1551 put_task_struct(task); 1552 } 1553 } 1554 } 1555 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 1556 1557 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1558 { 1559 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 1560 struct page *page; 1561 1562 if (vmf->pgoff == 0) 1563 page = virt_to_page(vcpu->run); 1564 #ifdef CONFIG_X86 1565 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 1566 page = virt_to_page(vcpu->arch.pio_data); 1567 #endif 1568 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1569 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 1570 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 1571 #endif 1572 else 1573 return VM_FAULT_SIGBUS; 1574 get_page(page); 1575 vmf->page = page; 1576 return 0; 1577 } 1578 1579 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 1580 .fault = kvm_vcpu_fault, 1581 }; 1582 1583 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 1584 { 1585 vma->vm_ops = &kvm_vcpu_vm_ops; 1586 return 0; 1587 } 1588 1589 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 1590 { 1591 struct kvm_vcpu *vcpu = filp->private_data; 1592 1593 kvm_put_kvm(vcpu->kvm); 1594 return 0; 1595 } 1596 1597 static struct file_operations kvm_vcpu_fops = { 1598 .release = kvm_vcpu_release, 1599 .unlocked_ioctl = kvm_vcpu_ioctl, 1600 .compat_ioctl = kvm_vcpu_ioctl, 1601 .mmap = kvm_vcpu_mmap, 1602 .llseek = noop_llseek, 1603 }; 1604 1605 /* 1606 * Allocates an inode for the vcpu. 1607 */ 1608 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 1609 { 1610 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); 1611 } 1612 1613 /* 1614 * Creates some virtual cpus. Good luck creating more than one. 1615 */ 1616 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1617 { 1618 int r; 1619 struct kvm_vcpu *vcpu, *v; 1620 1621 vcpu = kvm_arch_vcpu_create(kvm, id); 1622 if (IS_ERR(vcpu)) 1623 return PTR_ERR(vcpu); 1624 1625 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 1626 1627 r = kvm_arch_vcpu_setup(vcpu); 1628 if (r) 1629 return r; 1630 1631 mutex_lock(&kvm->lock); 1632 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 1633 r = -EINVAL; 1634 goto vcpu_destroy; 1635 } 1636 1637 kvm_for_each_vcpu(r, v, kvm) 1638 if (v->vcpu_id == id) { 1639 r = -EEXIST; 1640 goto vcpu_destroy; 1641 } 1642 1643 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 1644 1645 /* Now it's all set up, let userspace reach it */ 1646 kvm_get_kvm(kvm); 1647 r = create_vcpu_fd(vcpu); 1648 if (r < 0) { 1649 kvm_put_kvm(kvm); 1650 goto vcpu_destroy; 1651 } 1652 1653 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 1654 smp_wmb(); 1655 atomic_inc(&kvm->online_vcpus); 1656 1657 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1658 if (kvm->bsp_vcpu_id == id) 1659 kvm->bsp_vcpu = vcpu; 1660 #endif 1661 mutex_unlock(&kvm->lock); 1662 return r; 1663 1664 vcpu_destroy: 1665 mutex_unlock(&kvm->lock); 1666 kvm_arch_vcpu_destroy(vcpu); 1667 return r; 1668 } 1669 1670 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 1671 { 1672 if (sigset) { 1673 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1674 vcpu->sigset_active = 1; 1675 vcpu->sigset = *sigset; 1676 } else 1677 vcpu->sigset_active = 0; 1678 return 0; 1679 } 1680 1681 static long kvm_vcpu_ioctl(struct file *filp, 1682 unsigned int ioctl, unsigned long arg) 1683 { 1684 struct kvm_vcpu *vcpu = filp->private_data; 1685 void __user *argp = (void __user *)arg; 1686 int r; 1687 struct kvm_fpu *fpu = NULL; 1688 struct kvm_sregs *kvm_sregs = NULL; 1689 1690 if (vcpu->kvm->mm != current->mm) 1691 return -EIO; 1692 1693 #if defined(CONFIG_S390) || defined(CONFIG_PPC) 1694 /* 1695 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1696 * so vcpu_load() would break it. 1697 */ 1698 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT) 1699 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1700 #endif 1701 1702 1703 vcpu_load(vcpu); 1704 switch (ioctl) { 1705 case KVM_RUN: 1706 r = -EINVAL; 1707 if (arg) 1708 goto out; 1709 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 1710 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 1711 break; 1712 case KVM_GET_REGS: { 1713 struct kvm_regs *kvm_regs; 1714 1715 r = -ENOMEM; 1716 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1717 if (!kvm_regs) 1718 goto out; 1719 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 1720 if (r) 1721 goto out_free1; 1722 r = -EFAULT; 1723 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 1724 goto out_free1; 1725 r = 0; 1726 out_free1: 1727 kfree(kvm_regs); 1728 break; 1729 } 1730 case KVM_SET_REGS: { 1731 struct kvm_regs *kvm_regs; 1732 1733 r = -ENOMEM; 1734 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 1735 if (!kvm_regs) 1736 goto out; 1737 r = -EFAULT; 1738 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs))) 1739 goto out_free2; 1740 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 1741 if (r) 1742 goto out_free2; 1743 r = 0; 1744 out_free2: 1745 kfree(kvm_regs); 1746 break; 1747 } 1748 case KVM_GET_SREGS: { 1749 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1750 r = -ENOMEM; 1751 if (!kvm_sregs) 1752 goto out; 1753 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1754 if (r) 1755 goto out; 1756 r = -EFAULT; 1757 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1758 goto out; 1759 r = 0; 1760 break; 1761 } 1762 case KVM_SET_SREGS: { 1763 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1764 r = -ENOMEM; 1765 if (!kvm_sregs) 1766 goto out; 1767 r = -EFAULT; 1768 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1769 goto out; 1770 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1771 if (r) 1772 goto out; 1773 r = 0; 1774 break; 1775 } 1776 case KVM_GET_MP_STATE: { 1777 struct kvm_mp_state mp_state; 1778 1779 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 1780 if (r) 1781 goto out; 1782 r = -EFAULT; 1783 if (copy_to_user(argp, &mp_state, sizeof mp_state)) 1784 goto out; 1785 r = 0; 1786 break; 1787 } 1788 case KVM_SET_MP_STATE: { 1789 struct kvm_mp_state mp_state; 1790 1791 r = -EFAULT; 1792 if (copy_from_user(&mp_state, argp, sizeof mp_state)) 1793 goto out; 1794 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 1795 if (r) 1796 goto out; 1797 r = 0; 1798 break; 1799 } 1800 case KVM_TRANSLATE: { 1801 struct kvm_translation tr; 1802 1803 r = -EFAULT; 1804 if (copy_from_user(&tr, argp, sizeof tr)) 1805 goto out; 1806 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 1807 if (r) 1808 goto out; 1809 r = -EFAULT; 1810 if (copy_to_user(argp, &tr, sizeof tr)) 1811 goto out; 1812 r = 0; 1813 break; 1814 } 1815 case KVM_SET_GUEST_DEBUG: { 1816 struct kvm_guest_debug dbg; 1817 1818 r = -EFAULT; 1819 if (copy_from_user(&dbg, argp, sizeof dbg)) 1820 goto out; 1821 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 1822 if (r) 1823 goto out; 1824 r = 0; 1825 break; 1826 } 1827 case KVM_SET_SIGNAL_MASK: { 1828 struct kvm_signal_mask __user *sigmask_arg = argp; 1829 struct kvm_signal_mask kvm_sigmask; 1830 sigset_t sigset, *p; 1831 1832 p = NULL; 1833 if (argp) { 1834 r = -EFAULT; 1835 if (copy_from_user(&kvm_sigmask, argp, 1836 sizeof kvm_sigmask)) 1837 goto out; 1838 r = -EINVAL; 1839 if (kvm_sigmask.len != sizeof sigset) 1840 goto out; 1841 r = -EFAULT; 1842 if (copy_from_user(&sigset, sigmask_arg->sigset, 1843 sizeof sigset)) 1844 goto out; 1845 p = &sigset; 1846 } 1847 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 1848 break; 1849 } 1850 case KVM_GET_FPU: { 1851 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1852 r = -ENOMEM; 1853 if (!fpu) 1854 goto out; 1855 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1856 if (r) 1857 goto out; 1858 r = -EFAULT; 1859 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1860 goto out; 1861 r = 0; 1862 break; 1863 } 1864 case KVM_SET_FPU: { 1865 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1866 r = -ENOMEM; 1867 if (!fpu) 1868 goto out; 1869 r = -EFAULT; 1870 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1871 goto out; 1872 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1873 if (r) 1874 goto out; 1875 r = 0; 1876 break; 1877 } 1878 default: 1879 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1880 } 1881 out: 1882 vcpu_put(vcpu); 1883 kfree(fpu); 1884 kfree(kvm_sregs); 1885 return r; 1886 } 1887 1888 static long kvm_vm_ioctl(struct file *filp, 1889 unsigned int ioctl, unsigned long arg) 1890 { 1891 struct kvm *kvm = filp->private_data; 1892 void __user *argp = (void __user *)arg; 1893 int r; 1894 1895 if (kvm->mm != current->mm) 1896 return -EIO; 1897 switch (ioctl) { 1898 case KVM_CREATE_VCPU: 1899 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 1900 if (r < 0) 1901 goto out; 1902 break; 1903 case KVM_SET_USER_MEMORY_REGION: { 1904 struct kvm_userspace_memory_region kvm_userspace_mem; 1905 1906 r = -EFAULT; 1907 if (copy_from_user(&kvm_userspace_mem, argp, 1908 sizeof kvm_userspace_mem)) 1909 goto out; 1910 1911 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1); 1912 if (r) 1913 goto out; 1914 break; 1915 } 1916 case KVM_GET_DIRTY_LOG: { 1917 struct kvm_dirty_log log; 1918 1919 r = -EFAULT; 1920 if (copy_from_user(&log, argp, sizeof log)) 1921 goto out; 1922 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 1923 if (r) 1924 goto out; 1925 break; 1926 } 1927 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 1928 case KVM_REGISTER_COALESCED_MMIO: { 1929 struct kvm_coalesced_mmio_zone zone; 1930 r = -EFAULT; 1931 if (copy_from_user(&zone, argp, sizeof zone)) 1932 goto out; 1933 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 1934 if (r) 1935 goto out; 1936 r = 0; 1937 break; 1938 } 1939 case KVM_UNREGISTER_COALESCED_MMIO: { 1940 struct kvm_coalesced_mmio_zone zone; 1941 r = -EFAULT; 1942 if (copy_from_user(&zone, argp, sizeof zone)) 1943 goto out; 1944 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 1945 if (r) 1946 goto out; 1947 r = 0; 1948 break; 1949 } 1950 #endif 1951 case KVM_IRQFD: { 1952 struct kvm_irqfd data; 1953 1954 r = -EFAULT; 1955 if (copy_from_user(&data, argp, sizeof data)) 1956 goto out; 1957 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags); 1958 break; 1959 } 1960 case KVM_IOEVENTFD: { 1961 struct kvm_ioeventfd data; 1962 1963 r = -EFAULT; 1964 if (copy_from_user(&data, argp, sizeof data)) 1965 goto out; 1966 r = kvm_ioeventfd(kvm, &data); 1967 break; 1968 } 1969 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 1970 case KVM_SET_BOOT_CPU_ID: 1971 r = 0; 1972 mutex_lock(&kvm->lock); 1973 if (atomic_read(&kvm->online_vcpus) != 0) 1974 r = -EBUSY; 1975 else 1976 kvm->bsp_vcpu_id = arg; 1977 mutex_unlock(&kvm->lock); 1978 break; 1979 #endif 1980 default: 1981 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1982 if (r == -ENOTTY) 1983 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); 1984 } 1985 out: 1986 return r; 1987 } 1988 1989 #ifdef CONFIG_COMPAT 1990 struct compat_kvm_dirty_log { 1991 __u32 slot; 1992 __u32 padding1; 1993 union { 1994 compat_uptr_t dirty_bitmap; /* one bit per page */ 1995 __u64 padding2; 1996 }; 1997 }; 1998 1999 static long kvm_vm_compat_ioctl(struct file *filp, 2000 unsigned int ioctl, unsigned long arg) 2001 { 2002 struct kvm *kvm = filp->private_data; 2003 int r; 2004 2005 if (kvm->mm != current->mm) 2006 return -EIO; 2007 switch (ioctl) { 2008 case KVM_GET_DIRTY_LOG: { 2009 struct compat_kvm_dirty_log compat_log; 2010 struct kvm_dirty_log log; 2011 2012 r = -EFAULT; 2013 if (copy_from_user(&compat_log, (void __user *)arg, 2014 sizeof(compat_log))) 2015 goto out; 2016 log.slot = compat_log.slot; 2017 log.padding1 = compat_log.padding1; 2018 log.padding2 = compat_log.padding2; 2019 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2020 2021 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2022 if (r) 2023 goto out; 2024 break; 2025 } 2026 default: 2027 r = kvm_vm_ioctl(filp, ioctl, arg); 2028 } 2029 2030 out: 2031 return r; 2032 } 2033 #endif 2034 2035 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2036 { 2037 struct page *page[1]; 2038 unsigned long addr; 2039 int npages; 2040 gfn_t gfn = vmf->pgoff; 2041 struct kvm *kvm = vma->vm_file->private_data; 2042 2043 addr = gfn_to_hva(kvm, gfn); 2044 if (kvm_is_error_hva(addr)) 2045 return VM_FAULT_SIGBUS; 2046 2047 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 2048 NULL); 2049 if (unlikely(npages != 1)) 2050 return VM_FAULT_SIGBUS; 2051 2052 vmf->page = page[0]; 2053 return 0; 2054 } 2055 2056 static const struct vm_operations_struct kvm_vm_vm_ops = { 2057 .fault = kvm_vm_fault, 2058 }; 2059 2060 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) 2061 { 2062 vma->vm_ops = &kvm_vm_vm_ops; 2063 return 0; 2064 } 2065 2066 static struct file_operations kvm_vm_fops = { 2067 .release = kvm_vm_release, 2068 .unlocked_ioctl = kvm_vm_ioctl, 2069 #ifdef CONFIG_COMPAT 2070 .compat_ioctl = kvm_vm_compat_ioctl, 2071 #endif 2072 .mmap = kvm_vm_mmap, 2073 .llseek = noop_llseek, 2074 }; 2075 2076 static int kvm_dev_ioctl_create_vm(void) 2077 { 2078 int r; 2079 struct kvm *kvm; 2080 2081 kvm = kvm_create_vm(); 2082 if (IS_ERR(kvm)) 2083 return PTR_ERR(kvm); 2084 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2085 r = kvm_coalesced_mmio_init(kvm); 2086 if (r < 0) { 2087 kvm_put_kvm(kvm); 2088 return r; 2089 } 2090 #endif 2091 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 2092 if (r < 0) 2093 kvm_put_kvm(kvm); 2094 2095 return r; 2096 } 2097 2098 static long kvm_dev_ioctl_check_extension_generic(long arg) 2099 { 2100 switch (arg) { 2101 case KVM_CAP_USER_MEMORY: 2102 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2103 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2104 #ifdef CONFIG_KVM_APIC_ARCHITECTURE 2105 case KVM_CAP_SET_BOOT_CPU_ID: 2106 #endif 2107 case KVM_CAP_INTERNAL_ERROR_DATA: 2108 return 1; 2109 #ifdef CONFIG_HAVE_KVM_IRQCHIP 2110 case KVM_CAP_IRQ_ROUTING: 2111 return KVM_MAX_IRQ_ROUTES; 2112 #endif 2113 default: 2114 break; 2115 } 2116 return kvm_dev_ioctl_check_extension(arg); 2117 } 2118 2119 static long kvm_dev_ioctl(struct file *filp, 2120 unsigned int ioctl, unsigned long arg) 2121 { 2122 long r = -EINVAL; 2123 2124 switch (ioctl) { 2125 case KVM_GET_API_VERSION: 2126 r = -EINVAL; 2127 if (arg) 2128 goto out; 2129 r = KVM_API_VERSION; 2130 break; 2131 case KVM_CREATE_VM: 2132 r = -EINVAL; 2133 if (arg) 2134 goto out; 2135 r = kvm_dev_ioctl_create_vm(); 2136 break; 2137 case KVM_CHECK_EXTENSION: 2138 r = kvm_dev_ioctl_check_extension_generic(arg); 2139 break; 2140 case KVM_GET_VCPU_MMAP_SIZE: 2141 r = -EINVAL; 2142 if (arg) 2143 goto out; 2144 r = PAGE_SIZE; /* struct kvm_run */ 2145 #ifdef CONFIG_X86 2146 r += PAGE_SIZE; /* pio data page */ 2147 #endif 2148 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2149 r += PAGE_SIZE; /* coalesced mmio ring page */ 2150 #endif 2151 break; 2152 case KVM_TRACE_ENABLE: 2153 case KVM_TRACE_PAUSE: 2154 case KVM_TRACE_DISABLE: 2155 r = -EOPNOTSUPP; 2156 break; 2157 default: 2158 return kvm_arch_dev_ioctl(filp, ioctl, arg); 2159 } 2160 out: 2161 return r; 2162 } 2163 2164 static struct file_operations kvm_chardev_ops = { 2165 .unlocked_ioctl = kvm_dev_ioctl, 2166 .compat_ioctl = kvm_dev_ioctl, 2167 .llseek = noop_llseek, 2168 }; 2169 2170 static struct miscdevice kvm_dev = { 2171 KVM_MINOR, 2172 "kvm", 2173 &kvm_chardev_ops, 2174 }; 2175 2176 static void hardware_enable_nolock(void *junk) 2177 { 2178 int cpu = raw_smp_processor_id(); 2179 int r; 2180 2181 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2182 return; 2183 2184 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2185 2186 r = kvm_arch_hardware_enable(NULL); 2187 2188 if (r) { 2189 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2190 atomic_inc(&hardware_enable_failed); 2191 printk(KERN_INFO "kvm: enabling virtualization on " 2192 "CPU%d failed\n", cpu); 2193 } 2194 } 2195 2196 static void hardware_enable(void *junk) 2197 { 2198 raw_spin_lock(&kvm_lock); 2199 hardware_enable_nolock(junk); 2200 raw_spin_unlock(&kvm_lock); 2201 } 2202 2203 static void hardware_disable_nolock(void *junk) 2204 { 2205 int cpu = raw_smp_processor_id(); 2206 2207 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2208 return; 2209 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2210 kvm_arch_hardware_disable(NULL); 2211 } 2212 2213 static void hardware_disable(void *junk) 2214 { 2215 raw_spin_lock(&kvm_lock); 2216 hardware_disable_nolock(junk); 2217 raw_spin_unlock(&kvm_lock); 2218 } 2219 2220 static void hardware_disable_all_nolock(void) 2221 { 2222 BUG_ON(!kvm_usage_count); 2223 2224 kvm_usage_count--; 2225 if (!kvm_usage_count) 2226 on_each_cpu(hardware_disable_nolock, NULL, 1); 2227 } 2228 2229 static void hardware_disable_all(void) 2230 { 2231 raw_spin_lock(&kvm_lock); 2232 hardware_disable_all_nolock(); 2233 raw_spin_unlock(&kvm_lock); 2234 } 2235 2236 static int hardware_enable_all(void) 2237 { 2238 int r = 0; 2239 2240 raw_spin_lock(&kvm_lock); 2241 2242 kvm_usage_count++; 2243 if (kvm_usage_count == 1) { 2244 atomic_set(&hardware_enable_failed, 0); 2245 on_each_cpu(hardware_enable_nolock, NULL, 1); 2246 2247 if (atomic_read(&hardware_enable_failed)) { 2248 hardware_disable_all_nolock(); 2249 r = -EBUSY; 2250 } 2251 } 2252 2253 raw_spin_unlock(&kvm_lock); 2254 2255 return r; 2256 } 2257 2258 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 2259 void *v) 2260 { 2261 int cpu = (long)v; 2262 2263 if (!kvm_usage_count) 2264 return NOTIFY_OK; 2265 2266 val &= ~CPU_TASKS_FROZEN; 2267 switch (val) { 2268 case CPU_DYING: 2269 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2270 cpu); 2271 hardware_disable(NULL); 2272 break; 2273 case CPU_STARTING: 2274 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2275 cpu); 2276 hardware_enable(NULL); 2277 break; 2278 } 2279 return NOTIFY_OK; 2280 } 2281 2282 2283 asmlinkage void kvm_spurious_fault(void) 2284 { 2285 /* Fault while not rebooting. We want the trace. */ 2286 BUG(); 2287 } 2288 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 2289 2290 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 2291 void *v) 2292 { 2293 /* 2294 * Some (well, at least mine) BIOSes hang on reboot if 2295 * in vmx root mode. 2296 * 2297 * And Intel TXT required VMX off for all cpu when system shutdown. 2298 */ 2299 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2300 kvm_rebooting = true; 2301 on_each_cpu(hardware_disable_nolock, NULL, 1); 2302 return NOTIFY_OK; 2303 } 2304 2305 static struct notifier_block kvm_reboot_notifier = { 2306 .notifier_call = kvm_reboot, 2307 .priority = 0, 2308 }; 2309 2310 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 2311 { 2312 int i; 2313 2314 for (i = 0; i < bus->dev_count; i++) { 2315 struct kvm_io_device *pos = bus->devs[i]; 2316 2317 kvm_iodevice_destructor(pos); 2318 } 2319 kfree(bus); 2320 } 2321 2322 /* kvm_io_bus_write - called under kvm->slots_lock */ 2323 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2324 int len, const void *val) 2325 { 2326 int i; 2327 struct kvm_io_bus *bus; 2328 2329 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2330 for (i = 0; i < bus->dev_count; i++) 2331 if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) 2332 return 0; 2333 return -EOPNOTSUPP; 2334 } 2335 2336 /* kvm_io_bus_read - called under kvm->slots_lock */ 2337 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2338 int len, void *val) 2339 { 2340 int i; 2341 struct kvm_io_bus *bus; 2342 2343 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 2344 for (i = 0; i < bus->dev_count; i++) 2345 if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) 2346 return 0; 2347 return -EOPNOTSUPP; 2348 } 2349 2350 /* Caller must hold slots_lock. */ 2351 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2352 struct kvm_io_device *dev) 2353 { 2354 struct kvm_io_bus *new_bus, *bus; 2355 2356 bus = kvm->buses[bus_idx]; 2357 if (bus->dev_count > NR_IOBUS_DEVS-1) 2358 return -ENOSPC; 2359 2360 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2361 if (!new_bus) 2362 return -ENOMEM; 2363 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2364 new_bus->devs[new_bus->dev_count++] = dev; 2365 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2366 synchronize_srcu_expedited(&kvm->srcu); 2367 kfree(bus); 2368 2369 return 0; 2370 } 2371 2372 /* Caller must hold slots_lock. */ 2373 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 2374 struct kvm_io_device *dev) 2375 { 2376 int i, r; 2377 struct kvm_io_bus *new_bus, *bus; 2378 2379 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); 2380 if (!new_bus) 2381 return -ENOMEM; 2382 2383 bus = kvm->buses[bus_idx]; 2384 memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); 2385 2386 r = -ENOENT; 2387 for (i = 0; i < new_bus->dev_count; i++) 2388 if (new_bus->devs[i] == dev) { 2389 r = 0; 2390 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; 2391 break; 2392 } 2393 2394 if (r) { 2395 kfree(new_bus); 2396 return r; 2397 } 2398 2399 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2400 synchronize_srcu_expedited(&kvm->srcu); 2401 kfree(bus); 2402 return r; 2403 } 2404 2405 static struct notifier_block kvm_cpu_notifier = { 2406 .notifier_call = kvm_cpu_hotplug, 2407 }; 2408 2409 static int vm_stat_get(void *_offset, u64 *val) 2410 { 2411 unsigned offset = (long)_offset; 2412 struct kvm *kvm; 2413 2414 *val = 0; 2415 raw_spin_lock(&kvm_lock); 2416 list_for_each_entry(kvm, &vm_list, vm_list) 2417 *val += *(u32 *)((void *)kvm + offset); 2418 raw_spin_unlock(&kvm_lock); 2419 return 0; 2420 } 2421 2422 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 2423 2424 static int vcpu_stat_get(void *_offset, u64 *val) 2425 { 2426 unsigned offset = (long)_offset; 2427 struct kvm *kvm; 2428 struct kvm_vcpu *vcpu; 2429 int i; 2430 2431 *val = 0; 2432 raw_spin_lock(&kvm_lock); 2433 list_for_each_entry(kvm, &vm_list, vm_list) 2434 kvm_for_each_vcpu(i, vcpu, kvm) 2435 *val += *(u32 *)((void *)vcpu + offset); 2436 2437 raw_spin_unlock(&kvm_lock); 2438 return 0; 2439 } 2440 2441 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2442 2443 static const struct file_operations *stat_fops[] = { 2444 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2445 [KVM_STAT_VM] = &vm_stat_fops, 2446 }; 2447 2448 static void kvm_init_debug(void) 2449 { 2450 struct kvm_stats_debugfs_item *p; 2451 2452 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 2453 for (p = debugfs_entries; p->name; ++p) 2454 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 2455 (void *)(long)p->offset, 2456 stat_fops[p->kind]); 2457 } 2458 2459 static void kvm_exit_debug(void) 2460 { 2461 struct kvm_stats_debugfs_item *p; 2462 2463 for (p = debugfs_entries; p->name; ++p) 2464 debugfs_remove(p->dentry); 2465 debugfs_remove(kvm_debugfs_dir); 2466 } 2467 2468 static int kvm_suspend(void) 2469 { 2470 if (kvm_usage_count) 2471 hardware_disable_nolock(NULL); 2472 return 0; 2473 } 2474 2475 static void kvm_resume(void) 2476 { 2477 if (kvm_usage_count) { 2478 WARN_ON(raw_spin_is_locked(&kvm_lock)); 2479 hardware_enable_nolock(NULL); 2480 } 2481 } 2482 2483 static struct syscore_ops kvm_syscore_ops = { 2484 .suspend = kvm_suspend, 2485 .resume = kvm_resume, 2486 }; 2487 2488 struct page *bad_page; 2489 pfn_t bad_pfn; 2490 2491 static inline 2492 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 2493 { 2494 return container_of(pn, struct kvm_vcpu, preempt_notifier); 2495 } 2496 2497 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 2498 { 2499 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2500 2501 kvm_arch_vcpu_load(vcpu, cpu); 2502 } 2503 2504 static void kvm_sched_out(struct preempt_notifier *pn, 2505 struct task_struct *next) 2506 { 2507 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 2508 2509 kvm_arch_vcpu_put(vcpu); 2510 } 2511 2512 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 2513 struct module *module) 2514 { 2515 int r; 2516 int cpu; 2517 2518 r = kvm_arch_init(opaque); 2519 if (r) 2520 goto out_fail; 2521 2522 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2523 2524 if (bad_page == NULL) { 2525 r = -ENOMEM; 2526 goto out; 2527 } 2528 2529 bad_pfn = page_to_pfn(bad_page); 2530 2531 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2532 2533 if (hwpoison_page == NULL) { 2534 r = -ENOMEM; 2535 goto out_free_0; 2536 } 2537 2538 hwpoison_pfn = page_to_pfn(hwpoison_page); 2539 2540 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2541 2542 if (fault_page == NULL) { 2543 r = -ENOMEM; 2544 goto out_free_0; 2545 } 2546 2547 fault_pfn = page_to_pfn(fault_page); 2548 2549 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2550 r = -ENOMEM; 2551 goto out_free_0; 2552 } 2553 2554 r = kvm_arch_hardware_setup(); 2555 if (r < 0) 2556 goto out_free_0a; 2557 2558 for_each_online_cpu(cpu) { 2559 smp_call_function_single(cpu, 2560 kvm_arch_check_processor_compat, 2561 &r, 1); 2562 if (r < 0) 2563 goto out_free_1; 2564 } 2565 2566 r = register_cpu_notifier(&kvm_cpu_notifier); 2567 if (r) 2568 goto out_free_2; 2569 register_reboot_notifier(&kvm_reboot_notifier); 2570 2571 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2572 if (!vcpu_align) 2573 vcpu_align = __alignof__(struct kvm_vcpu); 2574 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 2575 0, NULL); 2576 if (!kvm_vcpu_cache) { 2577 r = -ENOMEM; 2578 goto out_free_3; 2579 } 2580 2581 r = kvm_async_pf_init(); 2582 if (r) 2583 goto out_free; 2584 2585 kvm_chardev_ops.owner = module; 2586 kvm_vm_fops.owner = module; 2587 kvm_vcpu_fops.owner = module; 2588 2589 r = misc_register(&kvm_dev); 2590 if (r) { 2591 printk(KERN_ERR "kvm: misc device register failed\n"); 2592 goto out_unreg; 2593 } 2594 2595 register_syscore_ops(&kvm_syscore_ops); 2596 2597 kvm_preempt_ops.sched_in = kvm_sched_in; 2598 kvm_preempt_ops.sched_out = kvm_sched_out; 2599 2600 kvm_init_debug(); 2601 2602 return 0; 2603 2604 out_unreg: 2605 kvm_async_pf_deinit(); 2606 out_free: 2607 kmem_cache_destroy(kvm_vcpu_cache); 2608 out_free_3: 2609 unregister_reboot_notifier(&kvm_reboot_notifier); 2610 unregister_cpu_notifier(&kvm_cpu_notifier); 2611 out_free_2: 2612 out_free_1: 2613 kvm_arch_hardware_unsetup(); 2614 out_free_0a: 2615 free_cpumask_var(cpus_hardware_enabled); 2616 out_free_0: 2617 if (fault_page) 2618 __free_page(fault_page); 2619 if (hwpoison_page) 2620 __free_page(hwpoison_page); 2621 __free_page(bad_page); 2622 out: 2623 kvm_arch_exit(); 2624 out_fail: 2625 return r; 2626 } 2627 EXPORT_SYMBOL_GPL(kvm_init); 2628 2629 void kvm_exit(void) 2630 { 2631 kvm_exit_debug(); 2632 misc_deregister(&kvm_dev); 2633 kmem_cache_destroy(kvm_vcpu_cache); 2634 kvm_async_pf_deinit(); 2635 unregister_syscore_ops(&kvm_syscore_ops); 2636 unregister_reboot_notifier(&kvm_reboot_notifier); 2637 unregister_cpu_notifier(&kvm_cpu_notifier); 2638 on_each_cpu(hardware_disable_nolock, NULL, 1); 2639 kvm_arch_hardware_unsetup(); 2640 kvm_arch_exit(); 2641 free_cpumask_var(cpus_hardware_enabled); 2642 __free_page(hwpoison_page); 2643 __free_page(bad_page); 2644 } 2645 EXPORT_SYMBOL_GPL(kvm_exit); 2646