1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <kvm/iodev.h> 20 21 #include <linux/kvm_host.h> 22 #include <linux/kvm.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/percpu.h> 26 #include <linux/mm.h> 27 #include <linux/miscdevice.h> 28 #include <linux/vmalloc.h> 29 #include <linux/reboot.h> 30 #include <linux/debugfs.h> 31 #include <linux/highmem.h> 32 #include <linux/file.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/cpu.h> 35 #include <linux/sched.h> 36 #include <linux/cpumask.h> 37 #include <linux/smp.h> 38 #include <linux/anon_inodes.h> 39 #include <linux/profile.h> 40 #include <linux/kvm_para.h> 41 #include <linux/pagemap.h> 42 #include <linux/mman.h> 43 #include <linux/swap.h> 44 #include <linux/bitops.h> 45 #include <linux/spinlock.h> 46 #include <linux/compat.h> 47 #include <linux/srcu.h> 48 #include <linux/hugetlb.h> 49 #include <linux/slab.h> 50 #include <linux/sort.h> 51 #include <linux/bsearch.h> 52 53 #include <asm/processor.h> 54 #include <asm/io.h> 55 #include <asm/ioctl.h> 56 #include <asm/uaccess.h> 57 #include <asm/pgtable.h> 58 59 #include "coalesced_mmio.h" 60 #include "async_pf.h" 61 #include "vfio.h" 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/kvm.h> 65 66 /* Worst case buffer size needed for holding an integer. */ 67 #define ITOA_MAX_LEN 12 68 69 MODULE_AUTHOR("Qumranet"); 70 MODULE_LICENSE("GPL"); 71 72 /* Architectures should define their poll value according to the halt latency */ 73 static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 74 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 75 76 /* Default doubles per-vcpu halt_poll_ns. */ 77 static unsigned int halt_poll_ns_grow = 2; 78 module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR); 79 80 /* Default resets per-vcpu halt_poll_ns . */ 81 static unsigned int halt_poll_ns_shrink; 82 module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR); 83 84 /* 85 * Ordering of locks: 86 * 87 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 88 */ 89 90 DEFINE_SPINLOCK(kvm_lock); 91 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 92 LIST_HEAD(vm_list); 93 94 static cpumask_var_t cpus_hardware_enabled; 95 static int kvm_usage_count; 96 static atomic_t hardware_enable_failed; 97 98 struct kmem_cache *kvm_vcpu_cache; 99 EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 100 101 static __read_mostly struct preempt_ops kvm_preempt_ops; 102 103 struct dentry *kvm_debugfs_dir; 104 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 105 106 static int kvm_debugfs_num_entries; 107 static const struct file_operations *stat_fops_per_vm[]; 108 109 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 110 unsigned long arg); 111 #ifdef CONFIG_KVM_COMPAT 112 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 113 unsigned long arg); 114 #endif 115 static int hardware_enable_all(void); 116 static void hardware_disable_all(void); 117 118 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 119 120 static void kvm_release_pfn_dirty(kvm_pfn_t pfn); 121 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 122 123 __visible bool kvm_rebooting; 124 EXPORT_SYMBOL_GPL(kvm_rebooting); 125 126 static bool largepages_enabled = true; 127 128 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 129 { 130 if (pfn_valid(pfn)) 131 return PageReserved(pfn_to_page(pfn)); 132 133 return true; 134 } 135 136 /* 137 * Switches to specified vcpu, until a matching vcpu_put() 138 */ 139 int vcpu_load(struct kvm_vcpu *vcpu) 140 { 141 int cpu; 142 143 if (mutex_lock_killable(&vcpu->mutex)) 144 return -EINTR; 145 cpu = get_cpu(); 146 preempt_notifier_register(&vcpu->preempt_notifier); 147 kvm_arch_vcpu_load(vcpu, cpu); 148 put_cpu(); 149 return 0; 150 } 151 EXPORT_SYMBOL_GPL(vcpu_load); 152 153 void vcpu_put(struct kvm_vcpu *vcpu) 154 { 155 preempt_disable(); 156 kvm_arch_vcpu_put(vcpu); 157 preempt_notifier_unregister(&vcpu->preempt_notifier); 158 preempt_enable(); 159 mutex_unlock(&vcpu->mutex); 160 } 161 EXPORT_SYMBOL_GPL(vcpu_put); 162 163 static void ack_flush(void *_completed) 164 { 165 } 166 167 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 168 { 169 int i, cpu, me; 170 cpumask_var_t cpus; 171 bool called = true; 172 struct kvm_vcpu *vcpu; 173 174 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 175 176 me = get_cpu(); 177 kvm_for_each_vcpu(i, vcpu, kvm) { 178 kvm_make_request(req, vcpu); 179 cpu = vcpu->cpu; 180 181 /* Set ->requests bit before we read ->mode. */ 182 smp_mb__after_atomic(); 183 184 if (cpus != NULL && cpu != -1 && cpu != me && 185 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 186 cpumask_set_cpu(cpu, cpus); 187 } 188 if (unlikely(cpus == NULL)) 189 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 190 else if (!cpumask_empty(cpus)) 191 smp_call_function_many(cpus, ack_flush, NULL, 1); 192 else 193 called = false; 194 put_cpu(); 195 free_cpumask_var(cpus); 196 return called; 197 } 198 199 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 200 void kvm_flush_remote_tlbs(struct kvm *kvm) 201 { 202 /* 203 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 204 * kvm_make_all_cpus_request. 205 */ 206 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 207 208 /* 209 * We want to publish modifications to the page tables before reading 210 * mode. Pairs with a memory barrier in arch-specific code. 211 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 212 * and smp_mb in walk_shadow_page_lockless_begin/end. 213 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 214 * 215 * There is already an smp_mb__after_atomic() before 216 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 217 * barrier here. 218 */ 219 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 220 ++kvm->stat.remote_tlb_flush; 221 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 222 } 223 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 224 #endif 225 226 void kvm_reload_remote_mmus(struct kvm *kvm) 227 { 228 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 229 } 230 231 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 232 { 233 struct page *page; 234 int r; 235 236 mutex_init(&vcpu->mutex); 237 vcpu->cpu = -1; 238 vcpu->kvm = kvm; 239 vcpu->vcpu_id = id; 240 vcpu->pid = NULL; 241 init_swait_queue_head(&vcpu->wq); 242 kvm_async_pf_vcpu_init(vcpu); 243 244 vcpu->pre_pcpu = -1; 245 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 246 247 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 248 if (!page) { 249 r = -ENOMEM; 250 goto fail; 251 } 252 vcpu->run = page_address(page); 253 254 kvm_vcpu_set_in_spin_loop(vcpu, false); 255 kvm_vcpu_set_dy_eligible(vcpu, false); 256 vcpu->preempted = false; 257 258 r = kvm_arch_vcpu_init(vcpu); 259 if (r < 0) 260 goto fail_free_run; 261 return 0; 262 263 fail_free_run: 264 free_page((unsigned long)vcpu->run); 265 fail: 266 return r; 267 } 268 EXPORT_SYMBOL_GPL(kvm_vcpu_init); 269 270 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 271 { 272 put_pid(vcpu->pid); 273 kvm_arch_vcpu_uninit(vcpu); 274 free_page((unsigned long)vcpu->run); 275 } 276 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 277 278 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 279 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 280 { 281 return container_of(mn, struct kvm, mmu_notifier); 282 } 283 284 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 285 struct mm_struct *mm, 286 unsigned long address) 287 { 288 struct kvm *kvm = mmu_notifier_to_kvm(mn); 289 int need_tlb_flush, idx; 290 291 /* 292 * When ->invalidate_page runs, the linux pte has been zapped 293 * already but the page is still allocated until 294 * ->invalidate_page returns. So if we increase the sequence 295 * here the kvm page fault will notice if the spte can't be 296 * established because the page is going to be freed. If 297 * instead the kvm page fault establishes the spte before 298 * ->invalidate_page runs, kvm_unmap_hva will release it 299 * before returning. 300 * 301 * The sequence increase only need to be seen at spin_unlock 302 * time, and not at spin_lock time. 303 * 304 * Increasing the sequence after the spin_unlock would be 305 * unsafe because the kvm page fault could then establish the 306 * pte after kvm_unmap_hva returned, without noticing the page 307 * is going to be freed. 308 */ 309 idx = srcu_read_lock(&kvm->srcu); 310 spin_lock(&kvm->mmu_lock); 311 312 kvm->mmu_notifier_seq++; 313 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 314 /* we've to flush the tlb before the pages can be freed */ 315 if (need_tlb_flush) 316 kvm_flush_remote_tlbs(kvm); 317 318 spin_unlock(&kvm->mmu_lock); 319 320 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 321 322 srcu_read_unlock(&kvm->srcu, idx); 323 } 324 325 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 326 struct mm_struct *mm, 327 unsigned long address, 328 pte_t pte) 329 { 330 struct kvm *kvm = mmu_notifier_to_kvm(mn); 331 int idx; 332 333 idx = srcu_read_lock(&kvm->srcu); 334 spin_lock(&kvm->mmu_lock); 335 kvm->mmu_notifier_seq++; 336 kvm_set_spte_hva(kvm, address, pte); 337 spin_unlock(&kvm->mmu_lock); 338 srcu_read_unlock(&kvm->srcu, idx); 339 } 340 341 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 342 struct mm_struct *mm, 343 unsigned long start, 344 unsigned long end) 345 { 346 struct kvm *kvm = mmu_notifier_to_kvm(mn); 347 int need_tlb_flush = 0, idx; 348 349 idx = srcu_read_lock(&kvm->srcu); 350 spin_lock(&kvm->mmu_lock); 351 /* 352 * The count increase must become visible at unlock time as no 353 * spte can be established without taking the mmu_lock and 354 * count is also read inside the mmu_lock critical section. 355 */ 356 kvm->mmu_notifier_count++; 357 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 358 need_tlb_flush |= kvm->tlbs_dirty; 359 /* we've to flush the tlb before the pages can be freed */ 360 if (need_tlb_flush) 361 kvm_flush_remote_tlbs(kvm); 362 363 spin_unlock(&kvm->mmu_lock); 364 srcu_read_unlock(&kvm->srcu, idx); 365 } 366 367 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 368 struct mm_struct *mm, 369 unsigned long start, 370 unsigned long end) 371 { 372 struct kvm *kvm = mmu_notifier_to_kvm(mn); 373 374 spin_lock(&kvm->mmu_lock); 375 /* 376 * This sequence increase will notify the kvm page fault that 377 * the page that is going to be mapped in the spte could have 378 * been freed. 379 */ 380 kvm->mmu_notifier_seq++; 381 smp_wmb(); 382 /* 383 * The above sequence increase must be visible before the 384 * below count decrease, which is ensured by the smp_wmb above 385 * in conjunction with the smp_rmb in mmu_notifier_retry(). 386 */ 387 kvm->mmu_notifier_count--; 388 spin_unlock(&kvm->mmu_lock); 389 390 BUG_ON(kvm->mmu_notifier_count < 0); 391 } 392 393 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 394 struct mm_struct *mm, 395 unsigned long start, 396 unsigned long end) 397 { 398 struct kvm *kvm = mmu_notifier_to_kvm(mn); 399 int young, idx; 400 401 idx = srcu_read_lock(&kvm->srcu); 402 spin_lock(&kvm->mmu_lock); 403 404 young = kvm_age_hva(kvm, start, end); 405 if (young) 406 kvm_flush_remote_tlbs(kvm); 407 408 spin_unlock(&kvm->mmu_lock); 409 srcu_read_unlock(&kvm->srcu, idx); 410 411 return young; 412 } 413 414 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 415 struct mm_struct *mm, 416 unsigned long start, 417 unsigned long end) 418 { 419 struct kvm *kvm = mmu_notifier_to_kvm(mn); 420 int young, idx; 421 422 idx = srcu_read_lock(&kvm->srcu); 423 spin_lock(&kvm->mmu_lock); 424 /* 425 * Even though we do not flush TLB, this will still adversely 426 * affect performance on pre-Haswell Intel EPT, where there is 427 * no EPT Access Bit to clear so that we have to tear down EPT 428 * tables instead. If we find this unacceptable, we can always 429 * add a parameter to kvm_age_hva so that it effectively doesn't 430 * do anything on clear_young. 431 * 432 * Also note that currently we never issue secondary TLB flushes 433 * from clear_young, leaving this job up to the regular system 434 * cadence. If we find this inaccurate, we might come up with a 435 * more sophisticated heuristic later. 436 */ 437 young = kvm_age_hva(kvm, start, end); 438 spin_unlock(&kvm->mmu_lock); 439 srcu_read_unlock(&kvm->srcu, idx); 440 441 return young; 442 } 443 444 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 445 struct mm_struct *mm, 446 unsigned long address) 447 { 448 struct kvm *kvm = mmu_notifier_to_kvm(mn); 449 int young, idx; 450 451 idx = srcu_read_lock(&kvm->srcu); 452 spin_lock(&kvm->mmu_lock); 453 young = kvm_test_age_hva(kvm, address); 454 spin_unlock(&kvm->mmu_lock); 455 srcu_read_unlock(&kvm->srcu, idx); 456 457 return young; 458 } 459 460 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 461 struct mm_struct *mm) 462 { 463 struct kvm *kvm = mmu_notifier_to_kvm(mn); 464 int idx; 465 466 idx = srcu_read_lock(&kvm->srcu); 467 kvm_arch_flush_shadow_all(kvm); 468 srcu_read_unlock(&kvm->srcu, idx); 469 } 470 471 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 472 .invalidate_page = kvm_mmu_notifier_invalidate_page, 473 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 474 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 475 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 476 .clear_young = kvm_mmu_notifier_clear_young, 477 .test_young = kvm_mmu_notifier_test_young, 478 .change_pte = kvm_mmu_notifier_change_pte, 479 .release = kvm_mmu_notifier_release, 480 }; 481 482 static int kvm_init_mmu_notifier(struct kvm *kvm) 483 { 484 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 485 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 486 } 487 488 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 489 490 static int kvm_init_mmu_notifier(struct kvm *kvm) 491 { 492 return 0; 493 } 494 495 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 496 497 static struct kvm_memslots *kvm_alloc_memslots(void) 498 { 499 int i; 500 struct kvm_memslots *slots; 501 502 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 503 if (!slots) 504 return NULL; 505 506 /* 507 * Init kvm generation close to the maximum to easily test the 508 * code of handling generation number wrap-around. 509 */ 510 slots->generation = -150; 511 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 512 slots->id_to_index[i] = slots->memslots[i].id = i; 513 514 return slots; 515 } 516 517 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 518 { 519 if (!memslot->dirty_bitmap) 520 return; 521 522 kvfree(memslot->dirty_bitmap); 523 memslot->dirty_bitmap = NULL; 524 } 525 526 /* 527 * Free any memory in @free but not in @dont. 528 */ 529 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 530 struct kvm_memory_slot *dont) 531 { 532 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 533 kvm_destroy_dirty_bitmap(free); 534 535 kvm_arch_free_memslot(kvm, free, dont); 536 537 free->npages = 0; 538 } 539 540 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 541 { 542 struct kvm_memory_slot *memslot; 543 544 if (!slots) 545 return; 546 547 kvm_for_each_memslot(memslot, slots) 548 kvm_free_memslot(kvm, memslot, NULL); 549 550 kvfree(slots); 551 } 552 553 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 554 { 555 int i; 556 557 if (!kvm->debugfs_dentry) 558 return; 559 560 debugfs_remove_recursive(kvm->debugfs_dentry); 561 562 if (kvm->debugfs_stat_data) { 563 for (i = 0; i < kvm_debugfs_num_entries; i++) 564 kfree(kvm->debugfs_stat_data[i]); 565 kfree(kvm->debugfs_stat_data); 566 } 567 } 568 569 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 570 { 571 char dir_name[ITOA_MAX_LEN * 2]; 572 struct kvm_stat_data *stat_data; 573 struct kvm_stats_debugfs_item *p; 574 575 if (!debugfs_initialized()) 576 return 0; 577 578 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 579 kvm->debugfs_dentry = debugfs_create_dir(dir_name, 580 kvm_debugfs_dir); 581 if (!kvm->debugfs_dentry) 582 return -ENOMEM; 583 584 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 585 sizeof(*kvm->debugfs_stat_data), 586 GFP_KERNEL); 587 if (!kvm->debugfs_stat_data) 588 return -ENOMEM; 589 590 for (p = debugfs_entries; p->name; p++) { 591 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); 592 if (!stat_data) 593 return -ENOMEM; 594 595 stat_data->kvm = kvm; 596 stat_data->offset = p->offset; 597 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 598 if (!debugfs_create_file(p->name, 0444, 599 kvm->debugfs_dentry, 600 stat_data, 601 stat_fops_per_vm[p->kind])) 602 return -ENOMEM; 603 } 604 return 0; 605 } 606 607 static struct kvm *kvm_create_vm(unsigned long type) 608 { 609 int r, i; 610 struct kvm *kvm = kvm_arch_alloc_vm(); 611 612 if (!kvm) 613 return ERR_PTR(-ENOMEM); 614 615 spin_lock_init(&kvm->mmu_lock); 616 atomic_inc(¤t->mm->mm_count); 617 kvm->mm = current->mm; 618 kvm_eventfd_init(kvm); 619 mutex_init(&kvm->lock); 620 mutex_init(&kvm->irq_lock); 621 mutex_init(&kvm->slots_lock); 622 atomic_set(&kvm->users_count, 1); 623 INIT_LIST_HEAD(&kvm->devices); 624 625 r = kvm_arch_init_vm(kvm, type); 626 if (r) 627 goto out_err_no_disable; 628 629 r = hardware_enable_all(); 630 if (r) 631 goto out_err_no_disable; 632 633 #ifdef CONFIG_HAVE_KVM_IRQFD 634 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 635 #endif 636 637 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 638 639 r = -ENOMEM; 640 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 641 kvm->memslots[i] = kvm_alloc_memslots(); 642 if (!kvm->memslots[i]) 643 goto out_err_no_srcu; 644 } 645 646 if (init_srcu_struct(&kvm->srcu)) 647 goto out_err_no_srcu; 648 if (init_srcu_struct(&kvm->irq_srcu)) 649 goto out_err_no_irq_srcu; 650 for (i = 0; i < KVM_NR_BUSES; i++) { 651 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 652 GFP_KERNEL); 653 if (!kvm->buses[i]) 654 goto out_err; 655 } 656 657 r = kvm_init_mmu_notifier(kvm); 658 if (r) 659 goto out_err; 660 661 spin_lock(&kvm_lock); 662 list_add(&kvm->vm_list, &vm_list); 663 spin_unlock(&kvm_lock); 664 665 preempt_notifier_inc(); 666 667 return kvm; 668 669 out_err: 670 cleanup_srcu_struct(&kvm->irq_srcu); 671 out_err_no_irq_srcu: 672 cleanup_srcu_struct(&kvm->srcu); 673 out_err_no_srcu: 674 hardware_disable_all(); 675 out_err_no_disable: 676 for (i = 0; i < KVM_NR_BUSES; i++) 677 kfree(kvm->buses[i]); 678 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 679 kvm_free_memslots(kvm, kvm->memslots[i]); 680 kvm_arch_free_vm(kvm); 681 mmdrop(current->mm); 682 return ERR_PTR(r); 683 } 684 685 /* 686 * Avoid using vmalloc for a small buffer. 687 * Should not be used when the size is statically known. 688 */ 689 void *kvm_kvzalloc(unsigned long size) 690 { 691 if (size > PAGE_SIZE) 692 return vzalloc(size); 693 else 694 return kzalloc(size, GFP_KERNEL); 695 } 696 697 static void kvm_destroy_devices(struct kvm *kvm) 698 { 699 struct kvm_device *dev, *tmp; 700 701 /* 702 * We do not need to take the kvm->lock here, because nobody else 703 * has a reference to the struct kvm at this point and therefore 704 * cannot access the devices list anyhow. 705 */ 706 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 707 list_del(&dev->vm_node); 708 dev->ops->destroy(dev); 709 } 710 } 711 712 static void kvm_destroy_vm(struct kvm *kvm) 713 { 714 int i; 715 struct mm_struct *mm = kvm->mm; 716 717 kvm_destroy_vm_debugfs(kvm); 718 kvm_arch_sync_events(kvm); 719 spin_lock(&kvm_lock); 720 list_del(&kvm->vm_list); 721 spin_unlock(&kvm_lock); 722 kvm_free_irq_routing(kvm); 723 for (i = 0; i < KVM_NR_BUSES; i++) 724 kvm_io_bus_destroy(kvm->buses[i]); 725 kvm_coalesced_mmio_free(kvm); 726 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 727 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 728 #else 729 kvm_arch_flush_shadow_all(kvm); 730 #endif 731 kvm_arch_destroy_vm(kvm); 732 kvm_destroy_devices(kvm); 733 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 734 kvm_free_memslots(kvm, kvm->memslots[i]); 735 cleanup_srcu_struct(&kvm->irq_srcu); 736 cleanup_srcu_struct(&kvm->srcu); 737 kvm_arch_free_vm(kvm); 738 preempt_notifier_dec(); 739 hardware_disable_all(); 740 mmdrop(mm); 741 } 742 743 void kvm_get_kvm(struct kvm *kvm) 744 { 745 atomic_inc(&kvm->users_count); 746 } 747 EXPORT_SYMBOL_GPL(kvm_get_kvm); 748 749 void kvm_put_kvm(struct kvm *kvm) 750 { 751 if (atomic_dec_and_test(&kvm->users_count)) 752 kvm_destroy_vm(kvm); 753 } 754 EXPORT_SYMBOL_GPL(kvm_put_kvm); 755 756 757 static int kvm_vm_release(struct inode *inode, struct file *filp) 758 { 759 struct kvm *kvm = filp->private_data; 760 761 kvm_irqfd_release(kvm); 762 763 kvm_put_kvm(kvm); 764 return 0; 765 } 766 767 /* 768 * Allocation size is twice as large as the actual dirty bitmap size. 769 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 770 */ 771 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 772 { 773 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 774 775 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 776 if (!memslot->dirty_bitmap) 777 return -ENOMEM; 778 779 return 0; 780 } 781 782 /* 783 * Insert memslot and re-sort memslots based on their GFN, 784 * so binary search could be used to lookup GFN. 785 * Sorting algorithm takes advantage of having initially 786 * sorted array and known changed memslot position. 787 */ 788 static void update_memslots(struct kvm_memslots *slots, 789 struct kvm_memory_slot *new) 790 { 791 int id = new->id; 792 int i = slots->id_to_index[id]; 793 struct kvm_memory_slot *mslots = slots->memslots; 794 795 WARN_ON(mslots[i].id != id); 796 if (!new->npages) { 797 WARN_ON(!mslots[i].npages); 798 if (mslots[i].npages) 799 slots->used_slots--; 800 } else { 801 if (!mslots[i].npages) 802 slots->used_slots++; 803 } 804 805 while (i < KVM_MEM_SLOTS_NUM - 1 && 806 new->base_gfn <= mslots[i + 1].base_gfn) { 807 if (!mslots[i + 1].npages) 808 break; 809 mslots[i] = mslots[i + 1]; 810 slots->id_to_index[mslots[i].id] = i; 811 i++; 812 } 813 814 /* 815 * The ">=" is needed when creating a slot with base_gfn == 0, 816 * so that it moves before all those with base_gfn == npages == 0. 817 * 818 * On the other hand, if new->npages is zero, the above loop has 819 * already left i pointing to the beginning of the empty part of 820 * mslots, and the ">=" would move the hole backwards in this 821 * case---which is wrong. So skip the loop when deleting a slot. 822 */ 823 if (new->npages) { 824 while (i > 0 && 825 new->base_gfn >= mslots[i - 1].base_gfn) { 826 mslots[i] = mslots[i - 1]; 827 slots->id_to_index[mslots[i].id] = i; 828 i--; 829 } 830 } else 831 WARN_ON_ONCE(i != slots->used_slots); 832 833 mslots[i] = *new; 834 slots->id_to_index[mslots[i].id] = i; 835 } 836 837 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 838 { 839 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 840 841 #ifdef __KVM_HAVE_READONLY_MEM 842 valid_flags |= KVM_MEM_READONLY; 843 #endif 844 845 if (mem->flags & ~valid_flags) 846 return -EINVAL; 847 848 return 0; 849 } 850 851 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 852 int as_id, struct kvm_memslots *slots) 853 { 854 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 855 856 /* 857 * Set the low bit in the generation, which disables SPTE caching 858 * until the end of synchronize_srcu_expedited. 859 */ 860 WARN_ON(old_memslots->generation & 1); 861 slots->generation = old_memslots->generation + 1; 862 863 rcu_assign_pointer(kvm->memslots[as_id], slots); 864 synchronize_srcu_expedited(&kvm->srcu); 865 866 /* 867 * Increment the new memslot generation a second time. This prevents 868 * vm exits that race with memslot updates from caching a memslot 869 * generation that will (potentially) be valid forever. 870 */ 871 slots->generation++; 872 873 kvm_arch_memslots_updated(kvm, slots); 874 875 return old_memslots; 876 } 877 878 /* 879 * Allocate some memory and give it an address in the guest physical address 880 * space. 881 * 882 * Discontiguous memory is allowed, mostly for framebuffers. 883 * 884 * Must be called holding kvm->slots_lock for write. 885 */ 886 int __kvm_set_memory_region(struct kvm *kvm, 887 const struct kvm_userspace_memory_region *mem) 888 { 889 int r; 890 gfn_t base_gfn; 891 unsigned long npages; 892 struct kvm_memory_slot *slot; 893 struct kvm_memory_slot old, new; 894 struct kvm_memslots *slots = NULL, *old_memslots; 895 int as_id, id; 896 enum kvm_mr_change change; 897 898 r = check_memory_region_flags(mem); 899 if (r) 900 goto out; 901 902 r = -EINVAL; 903 as_id = mem->slot >> 16; 904 id = (u16)mem->slot; 905 906 /* General sanity checks */ 907 if (mem->memory_size & (PAGE_SIZE - 1)) 908 goto out; 909 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 910 goto out; 911 /* We can read the guest memory with __xxx_user() later on. */ 912 if ((id < KVM_USER_MEM_SLOTS) && 913 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 914 !access_ok(VERIFY_WRITE, 915 (void __user *)(unsigned long)mem->userspace_addr, 916 mem->memory_size))) 917 goto out; 918 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 919 goto out; 920 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 921 goto out; 922 923 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 924 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 925 npages = mem->memory_size >> PAGE_SHIFT; 926 927 if (npages > KVM_MEM_MAX_NR_PAGES) 928 goto out; 929 930 new = old = *slot; 931 932 new.id = id; 933 new.base_gfn = base_gfn; 934 new.npages = npages; 935 new.flags = mem->flags; 936 937 if (npages) { 938 if (!old.npages) 939 change = KVM_MR_CREATE; 940 else { /* Modify an existing slot. */ 941 if ((mem->userspace_addr != old.userspace_addr) || 942 (npages != old.npages) || 943 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 944 goto out; 945 946 if (base_gfn != old.base_gfn) 947 change = KVM_MR_MOVE; 948 else if (new.flags != old.flags) 949 change = KVM_MR_FLAGS_ONLY; 950 else { /* Nothing to change. */ 951 r = 0; 952 goto out; 953 } 954 } 955 } else { 956 if (!old.npages) 957 goto out; 958 959 change = KVM_MR_DELETE; 960 new.base_gfn = 0; 961 new.flags = 0; 962 } 963 964 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 965 /* Check for overlaps */ 966 r = -EEXIST; 967 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 968 if ((slot->id >= KVM_USER_MEM_SLOTS) || 969 (slot->id == id)) 970 continue; 971 if (!((base_gfn + npages <= slot->base_gfn) || 972 (base_gfn >= slot->base_gfn + slot->npages))) 973 goto out; 974 } 975 } 976 977 /* Free page dirty bitmap if unneeded */ 978 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 979 new.dirty_bitmap = NULL; 980 981 r = -ENOMEM; 982 if (change == KVM_MR_CREATE) { 983 new.userspace_addr = mem->userspace_addr; 984 985 if (kvm_arch_create_memslot(kvm, &new, npages)) 986 goto out_free; 987 } 988 989 /* Allocate page dirty bitmap if needed */ 990 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 991 if (kvm_create_dirty_bitmap(&new) < 0) 992 goto out_free; 993 } 994 995 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 996 if (!slots) 997 goto out_free; 998 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 999 1000 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 1001 slot = id_to_memslot(slots, id); 1002 slot->flags |= KVM_MEMSLOT_INVALID; 1003 1004 old_memslots = install_new_memslots(kvm, as_id, slots); 1005 1006 /* slot was deleted or moved, clear iommu mapping */ 1007 kvm_iommu_unmap_pages(kvm, &old); 1008 /* From this point no new shadow pages pointing to a deleted, 1009 * or moved, memslot will be created. 1010 * 1011 * validation of sp->gfn happens in: 1012 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1013 * - kvm_is_visible_gfn (mmu_check_roots) 1014 */ 1015 kvm_arch_flush_shadow_memslot(kvm, slot); 1016 1017 /* 1018 * We can re-use the old_memslots from above, the only difference 1019 * from the currently installed memslots is the invalid flag. This 1020 * will get overwritten by update_memslots anyway. 1021 */ 1022 slots = old_memslots; 1023 } 1024 1025 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 1026 if (r) 1027 goto out_slots; 1028 1029 /* actual memory is freed via old in kvm_free_memslot below */ 1030 if (change == KVM_MR_DELETE) { 1031 new.dirty_bitmap = NULL; 1032 memset(&new.arch, 0, sizeof(new.arch)); 1033 } 1034 1035 update_memslots(slots, &new); 1036 old_memslots = install_new_memslots(kvm, as_id, slots); 1037 1038 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 1039 1040 kvm_free_memslot(kvm, &old, &new); 1041 kvfree(old_memslots); 1042 1043 /* 1044 * IOMMU mapping: New slots need to be mapped. Old slots need to be 1045 * un-mapped and re-mapped if their base changes. Since base change 1046 * unmapping is handled above with slot deletion, mapping alone is 1047 * needed here. Anything else the iommu might care about for existing 1048 * slots (size changes, userspace addr changes and read-only flag 1049 * changes) is disallowed above, so any other attribute changes getting 1050 * here can be skipped. 1051 */ 1052 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1053 r = kvm_iommu_map_pages(kvm, &new); 1054 return r; 1055 } 1056 1057 return 0; 1058 1059 out_slots: 1060 kvfree(slots); 1061 out_free: 1062 kvm_free_memslot(kvm, &new, &old); 1063 out: 1064 return r; 1065 } 1066 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1067 1068 int kvm_set_memory_region(struct kvm *kvm, 1069 const struct kvm_userspace_memory_region *mem) 1070 { 1071 int r; 1072 1073 mutex_lock(&kvm->slots_lock); 1074 r = __kvm_set_memory_region(kvm, mem); 1075 mutex_unlock(&kvm->slots_lock); 1076 return r; 1077 } 1078 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1079 1080 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1081 struct kvm_userspace_memory_region *mem) 1082 { 1083 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1084 return -EINVAL; 1085 1086 return kvm_set_memory_region(kvm, mem); 1087 } 1088 1089 int kvm_get_dirty_log(struct kvm *kvm, 1090 struct kvm_dirty_log *log, int *is_dirty) 1091 { 1092 struct kvm_memslots *slots; 1093 struct kvm_memory_slot *memslot; 1094 int r, i, as_id, id; 1095 unsigned long n; 1096 unsigned long any = 0; 1097 1098 r = -EINVAL; 1099 as_id = log->slot >> 16; 1100 id = (u16)log->slot; 1101 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1102 goto out; 1103 1104 slots = __kvm_memslots(kvm, as_id); 1105 memslot = id_to_memslot(slots, id); 1106 r = -ENOENT; 1107 if (!memslot->dirty_bitmap) 1108 goto out; 1109 1110 n = kvm_dirty_bitmap_bytes(memslot); 1111 1112 for (i = 0; !any && i < n/sizeof(long); ++i) 1113 any = memslot->dirty_bitmap[i]; 1114 1115 r = -EFAULT; 1116 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1117 goto out; 1118 1119 if (any) 1120 *is_dirty = 1; 1121 1122 r = 0; 1123 out: 1124 return r; 1125 } 1126 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1127 1128 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1129 /** 1130 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1131 * are dirty write protect them for next write. 1132 * @kvm: pointer to kvm instance 1133 * @log: slot id and address to which we copy the log 1134 * @is_dirty: flag set if any page is dirty 1135 * 1136 * We need to keep it in mind that VCPU threads can write to the bitmap 1137 * concurrently. So, to avoid losing track of dirty pages we keep the 1138 * following order: 1139 * 1140 * 1. Take a snapshot of the bit and clear it if needed. 1141 * 2. Write protect the corresponding page. 1142 * 3. Copy the snapshot to the userspace. 1143 * 4. Upon return caller flushes TLB's if needed. 1144 * 1145 * Between 2 and 4, the guest may write to the page using the remaining TLB 1146 * entry. This is not a problem because the page is reported dirty using 1147 * the snapshot taken before and step 4 ensures that writes done after 1148 * exiting to userspace will be logged for the next call. 1149 * 1150 */ 1151 int kvm_get_dirty_log_protect(struct kvm *kvm, 1152 struct kvm_dirty_log *log, bool *is_dirty) 1153 { 1154 struct kvm_memslots *slots; 1155 struct kvm_memory_slot *memslot; 1156 int r, i, as_id, id; 1157 unsigned long n; 1158 unsigned long *dirty_bitmap; 1159 unsigned long *dirty_bitmap_buffer; 1160 1161 r = -EINVAL; 1162 as_id = log->slot >> 16; 1163 id = (u16)log->slot; 1164 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1165 goto out; 1166 1167 slots = __kvm_memslots(kvm, as_id); 1168 memslot = id_to_memslot(slots, id); 1169 1170 dirty_bitmap = memslot->dirty_bitmap; 1171 r = -ENOENT; 1172 if (!dirty_bitmap) 1173 goto out; 1174 1175 n = kvm_dirty_bitmap_bytes(memslot); 1176 1177 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1178 memset(dirty_bitmap_buffer, 0, n); 1179 1180 spin_lock(&kvm->mmu_lock); 1181 *is_dirty = false; 1182 for (i = 0; i < n / sizeof(long); i++) { 1183 unsigned long mask; 1184 gfn_t offset; 1185 1186 if (!dirty_bitmap[i]) 1187 continue; 1188 1189 *is_dirty = true; 1190 1191 mask = xchg(&dirty_bitmap[i], 0); 1192 dirty_bitmap_buffer[i] = mask; 1193 1194 if (mask) { 1195 offset = i * BITS_PER_LONG; 1196 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1197 offset, mask); 1198 } 1199 } 1200 1201 spin_unlock(&kvm->mmu_lock); 1202 1203 r = -EFAULT; 1204 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1205 goto out; 1206 1207 r = 0; 1208 out: 1209 return r; 1210 } 1211 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1212 #endif 1213 1214 bool kvm_largepages_enabled(void) 1215 { 1216 return largepages_enabled; 1217 } 1218 1219 void kvm_disable_largepages(void) 1220 { 1221 largepages_enabled = false; 1222 } 1223 EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1224 1225 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1226 { 1227 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1228 } 1229 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1230 1231 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1232 { 1233 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1234 } 1235 1236 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1237 { 1238 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1239 1240 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1241 memslot->flags & KVM_MEMSLOT_INVALID) 1242 return false; 1243 1244 return true; 1245 } 1246 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1247 1248 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1249 { 1250 struct vm_area_struct *vma; 1251 unsigned long addr, size; 1252 1253 size = PAGE_SIZE; 1254 1255 addr = gfn_to_hva(kvm, gfn); 1256 if (kvm_is_error_hva(addr)) 1257 return PAGE_SIZE; 1258 1259 down_read(¤t->mm->mmap_sem); 1260 vma = find_vma(current->mm, addr); 1261 if (!vma) 1262 goto out; 1263 1264 size = vma_kernel_pagesize(vma); 1265 1266 out: 1267 up_read(¤t->mm->mmap_sem); 1268 1269 return size; 1270 } 1271 1272 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1273 { 1274 return slot->flags & KVM_MEM_READONLY; 1275 } 1276 1277 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1278 gfn_t *nr_pages, bool write) 1279 { 1280 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1281 return KVM_HVA_ERR_BAD; 1282 1283 if (memslot_is_readonly(slot) && write) 1284 return KVM_HVA_ERR_RO_BAD; 1285 1286 if (nr_pages) 1287 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1288 1289 return __gfn_to_hva_memslot(slot, gfn); 1290 } 1291 1292 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1293 gfn_t *nr_pages) 1294 { 1295 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1296 } 1297 1298 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1299 gfn_t gfn) 1300 { 1301 return gfn_to_hva_many(slot, gfn, NULL); 1302 } 1303 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1304 1305 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1306 { 1307 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1308 } 1309 EXPORT_SYMBOL_GPL(gfn_to_hva); 1310 1311 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1312 { 1313 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1314 } 1315 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1316 1317 /* 1318 * If writable is set to false, the hva returned by this function is only 1319 * allowed to be read. 1320 */ 1321 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1322 gfn_t gfn, bool *writable) 1323 { 1324 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1325 1326 if (!kvm_is_error_hva(hva) && writable) 1327 *writable = !memslot_is_readonly(slot); 1328 1329 return hva; 1330 } 1331 1332 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1333 { 1334 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1335 1336 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1337 } 1338 1339 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1340 { 1341 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1342 1343 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1344 } 1345 1346 static int get_user_page_nowait(unsigned long start, int write, 1347 struct page **page) 1348 { 1349 int flags = FOLL_NOWAIT | FOLL_HWPOISON; 1350 1351 if (write) 1352 flags |= FOLL_WRITE; 1353 1354 return get_user_pages(start, 1, flags, page, NULL); 1355 } 1356 1357 static inline int check_user_page_hwpoison(unsigned long addr) 1358 { 1359 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 1360 1361 rc = get_user_pages(addr, 1, flags, NULL, NULL); 1362 return rc == -EHWPOISON; 1363 } 1364 1365 /* 1366 * The atomic path to get the writable pfn which will be stored in @pfn, 1367 * true indicates success, otherwise false is returned. 1368 */ 1369 static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1370 bool write_fault, bool *writable, kvm_pfn_t *pfn) 1371 { 1372 struct page *page[1]; 1373 int npages; 1374 1375 if (!(async || atomic)) 1376 return false; 1377 1378 /* 1379 * Fast pin a writable pfn only if it is a write fault request 1380 * or the caller allows to map a writable pfn for a read fault 1381 * request. 1382 */ 1383 if (!(write_fault || writable)) 1384 return false; 1385 1386 npages = __get_user_pages_fast(addr, 1, 1, page); 1387 if (npages == 1) { 1388 *pfn = page_to_pfn(page[0]); 1389 1390 if (writable) 1391 *writable = true; 1392 return true; 1393 } 1394 1395 return false; 1396 } 1397 1398 /* 1399 * The slow path to get the pfn of the specified host virtual address, 1400 * 1 indicates success, -errno is returned if error is detected. 1401 */ 1402 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1403 bool *writable, kvm_pfn_t *pfn) 1404 { 1405 struct page *page[1]; 1406 int npages = 0; 1407 1408 might_sleep(); 1409 1410 if (writable) 1411 *writable = write_fault; 1412 1413 if (async) { 1414 down_read(¤t->mm->mmap_sem); 1415 npages = get_user_page_nowait(addr, write_fault, page); 1416 up_read(¤t->mm->mmap_sem); 1417 } else { 1418 unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; 1419 1420 if (write_fault) 1421 flags |= FOLL_WRITE; 1422 1423 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1424 page, flags); 1425 } 1426 if (npages != 1) 1427 return npages; 1428 1429 /* map read fault as writable if possible */ 1430 if (unlikely(!write_fault) && writable) { 1431 struct page *wpage[1]; 1432 1433 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1434 if (npages == 1) { 1435 *writable = true; 1436 put_page(page[0]); 1437 page[0] = wpage[0]; 1438 } 1439 1440 npages = 1; 1441 } 1442 *pfn = page_to_pfn(page[0]); 1443 return npages; 1444 } 1445 1446 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1447 { 1448 if (unlikely(!(vma->vm_flags & VM_READ))) 1449 return false; 1450 1451 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1452 return false; 1453 1454 return true; 1455 } 1456 1457 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 1458 unsigned long addr, bool *async, 1459 bool write_fault, kvm_pfn_t *p_pfn) 1460 { 1461 unsigned long pfn; 1462 int r; 1463 1464 r = follow_pfn(vma, addr, &pfn); 1465 if (r) { 1466 /* 1467 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 1468 * not call the fault handler, so do it here. 1469 */ 1470 bool unlocked = false; 1471 r = fixup_user_fault(current, current->mm, addr, 1472 (write_fault ? FAULT_FLAG_WRITE : 0), 1473 &unlocked); 1474 if (unlocked) 1475 return -EAGAIN; 1476 if (r) 1477 return r; 1478 1479 r = follow_pfn(vma, addr, &pfn); 1480 if (r) 1481 return r; 1482 1483 } 1484 1485 1486 /* 1487 * Get a reference here because callers of *hva_to_pfn* and 1488 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 1489 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 1490 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 1491 * simply do nothing for reserved pfns. 1492 * 1493 * Whoever called remap_pfn_range is also going to call e.g. 1494 * unmap_mapping_range before the underlying pages are freed, 1495 * causing a call to our MMU notifier. 1496 */ 1497 kvm_get_pfn(pfn); 1498 1499 *p_pfn = pfn; 1500 return 0; 1501 } 1502 1503 /* 1504 * Pin guest page in memory and return its pfn. 1505 * @addr: host virtual address which maps memory to the guest 1506 * @atomic: whether this function can sleep 1507 * @async: whether this function need to wait IO complete if the 1508 * host page is not in the memory 1509 * @write_fault: whether we should get a writable host page 1510 * @writable: whether it allows to map a writable host page for !@write_fault 1511 * 1512 * The function will map a writable host page for these two cases: 1513 * 1): @write_fault = true 1514 * 2): @write_fault = false && @writable, @writable will tell the caller 1515 * whether the mapping is writable. 1516 */ 1517 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1518 bool write_fault, bool *writable) 1519 { 1520 struct vm_area_struct *vma; 1521 kvm_pfn_t pfn = 0; 1522 int npages, r; 1523 1524 /* we can do it either atomically or asynchronously, not both */ 1525 BUG_ON(atomic && async); 1526 1527 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1528 return pfn; 1529 1530 if (atomic) 1531 return KVM_PFN_ERR_FAULT; 1532 1533 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1534 if (npages == 1) 1535 return pfn; 1536 1537 down_read(¤t->mm->mmap_sem); 1538 if (npages == -EHWPOISON || 1539 (!async && check_user_page_hwpoison(addr))) { 1540 pfn = KVM_PFN_ERR_HWPOISON; 1541 goto exit; 1542 } 1543 1544 retry: 1545 vma = find_vma_intersection(current->mm, addr, addr + 1); 1546 1547 if (vma == NULL) 1548 pfn = KVM_PFN_ERR_FAULT; 1549 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 1550 r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn); 1551 if (r == -EAGAIN) 1552 goto retry; 1553 if (r < 0) 1554 pfn = KVM_PFN_ERR_FAULT; 1555 } else { 1556 if (async && vma_is_valid(vma, write_fault)) 1557 *async = true; 1558 pfn = KVM_PFN_ERR_FAULT; 1559 } 1560 exit: 1561 up_read(¤t->mm->mmap_sem); 1562 return pfn; 1563 } 1564 1565 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 1566 bool atomic, bool *async, bool write_fault, 1567 bool *writable) 1568 { 1569 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1570 1571 if (addr == KVM_HVA_ERR_RO_BAD) { 1572 if (writable) 1573 *writable = false; 1574 return KVM_PFN_ERR_RO_FAULT; 1575 } 1576 1577 if (kvm_is_error_hva(addr)) { 1578 if (writable) 1579 *writable = false; 1580 return KVM_PFN_NOSLOT; 1581 } 1582 1583 /* Do not map writable pfn in the readonly memslot. */ 1584 if (writable && memslot_is_readonly(slot)) { 1585 *writable = false; 1586 writable = NULL; 1587 } 1588 1589 return hva_to_pfn(addr, atomic, async, write_fault, 1590 writable); 1591 } 1592 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1593 1594 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1595 bool *writable) 1596 { 1597 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1598 write_fault, writable); 1599 } 1600 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1601 1602 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1603 { 1604 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1605 } 1606 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1607 1608 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1609 { 1610 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1611 } 1612 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1613 1614 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1615 { 1616 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1617 } 1618 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1619 1620 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1621 { 1622 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1623 } 1624 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1625 1626 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1627 { 1628 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1629 } 1630 EXPORT_SYMBOL_GPL(gfn_to_pfn); 1631 1632 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1633 { 1634 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1635 } 1636 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1637 1638 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1639 struct page **pages, int nr_pages) 1640 { 1641 unsigned long addr; 1642 gfn_t entry; 1643 1644 addr = gfn_to_hva_many(slot, gfn, &entry); 1645 if (kvm_is_error_hva(addr)) 1646 return -1; 1647 1648 if (entry < nr_pages) 1649 return 0; 1650 1651 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1652 } 1653 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1654 1655 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 1656 { 1657 if (is_error_noslot_pfn(pfn)) 1658 return KVM_ERR_PTR_BAD_PAGE; 1659 1660 if (kvm_is_reserved_pfn(pfn)) { 1661 WARN_ON(1); 1662 return KVM_ERR_PTR_BAD_PAGE; 1663 } 1664 1665 return pfn_to_page(pfn); 1666 } 1667 1668 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1669 { 1670 kvm_pfn_t pfn; 1671 1672 pfn = gfn_to_pfn(kvm, gfn); 1673 1674 return kvm_pfn_to_page(pfn); 1675 } 1676 EXPORT_SYMBOL_GPL(gfn_to_page); 1677 1678 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1679 { 1680 kvm_pfn_t pfn; 1681 1682 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1683 1684 return kvm_pfn_to_page(pfn); 1685 } 1686 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1687 1688 void kvm_release_page_clean(struct page *page) 1689 { 1690 WARN_ON(is_error_page(page)); 1691 1692 kvm_release_pfn_clean(page_to_pfn(page)); 1693 } 1694 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1695 1696 void kvm_release_pfn_clean(kvm_pfn_t pfn) 1697 { 1698 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1699 put_page(pfn_to_page(pfn)); 1700 } 1701 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1702 1703 void kvm_release_page_dirty(struct page *page) 1704 { 1705 WARN_ON(is_error_page(page)); 1706 1707 kvm_release_pfn_dirty(page_to_pfn(page)); 1708 } 1709 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1710 1711 static void kvm_release_pfn_dirty(kvm_pfn_t pfn) 1712 { 1713 kvm_set_pfn_dirty(pfn); 1714 kvm_release_pfn_clean(pfn); 1715 } 1716 1717 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 1718 { 1719 if (!kvm_is_reserved_pfn(pfn)) { 1720 struct page *page = pfn_to_page(pfn); 1721 1722 if (!PageReserved(page)) 1723 SetPageDirty(page); 1724 } 1725 } 1726 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1727 1728 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 1729 { 1730 if (!kvm_is_reserved_pfn(pfn)) 1731 mark_page_accessed(pfn_to_page(pfn)); 1732 } 1733 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1734 1735 void kvm_get_pfn(kvm_pfn_t pfn) 1736 { 1737 if (!kvm_is_reserved_pfn(pfn)) 1738 get_page(pfn_to_page(pfn)); 1739 } 1740 EXPORT_SYMBOL_GPL(kvm_get_pfn); 1741 1742 static int next_segment(unsigned long len, int offset) 1743 { 1744 if (len > PAGE_SIZE - offset) 1745 return PAGE_SIZE - offset; 1746 else 1747 return len; 1748 } 1749 1750 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1751 void *data, int offset, int len) 1752 { 1753 int r; 1754 unsigned long addr; 1755 1756 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1757 if (kvm_is_error_hva(addr)) 1758 return -EFAULT; 1759 r = __copy_from_user(data, (void __user *)addr + offset, len); 1760 if (r) 1761 return -EFAULT; 1762 return 0; 1763 } 1764 1765 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1766 int len) 1767 { 1768 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1769 1770 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1771 } 1772 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1773 1774 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1775 int offset, int len) 1776 { 1777 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1778 1779 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1780 } 1781 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1782 1783 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1784 { 1785 gfn_t gfn = gpa >> PAGE_SHIFT; 1786 int seg; 1787 int offset = offset_in_page(gpa); 1788 int ret; 1789 1790 while ((seg = next_segment(len, offset)) != 0) { 1791 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1792 if (ret < 0) 1793 return ret; 1794 offset = 0; 1795 len -= seg; 1796 data += seg; 1797 ++gfn; 1798 } 1799 return 0; 1800 } 1801 EXPORT_SYMBOL_GPL(kvm_read_guest); 1802 1803 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1804 { 1805 gfn_t gfn = gpa >> PAGE_SHIFT; 1806 int seg; 1807 int offset = offset_in_page(gpa); 1808 int ret; 1809 1810 while ((seg = next_segment(len, offset)) != 0) { 1811 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1812 if (ret < 0) 1813 return ret; 1814 offset = 0; 1815 len -= seg; 1816 data += seg; 1817 ++gfn; 1818 } 1819 return 0; 1820 } 1821 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1822 1823 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1824 void *data, int offset, unsigned long len) 1825 { 1826 int r; 1827 unsigned long addr; 1828 1829 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1830 if (kvm_is_error_hva(addr)) 1831 return -EFAULT; 1832 pagefault_disable(); 1833 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1834 pagefault_enable(); 1835 if (r) 1836 return -EFAULT; 1837 return 0; 1838 } 1839 1840 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1841 unsigned long len) 1842 { 1843 gfn_t gfn = gpa >> PAGE_SHIFT; 1844 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1845 int offset = offset_in_page(gpa); 1846 1847 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1848 } 1849 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1850 1851 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1852 void *data, unsigned long len) 1853 { 1854 gfn_t gfn = gpa >> PAGE_SHIFT; 1855 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1856 int offset = offset_in_page(gpa); 1857 1858 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1859 } 1860 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1861 1862 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1863 const void *data, int offset, int len) 1864 { 1865 int r; 1866 unsigned long addr; 1867 1868 addr = gfn_to_hva_memslot(memslot, gfn); 1869 if (kvm_is_error_hva(addr)) 1870 return -EFAULT; 1871 r = __copy_to_user((void __user *)addr + offset, data, len); 1872 if (r) 1873 return -EFAULT; 1874 mark_page_dirty_in_slot(memslot, gfn); 1875 return 0; 1876 } 1877 1878 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1879 const void *data, int offset, int len) 1880 { 1881 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1882 1883 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1884 } 1885 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1886 1887 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1888 const void *data, int offset, int len) 1889 { 1890 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1891 1892 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1893 } 1894 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1895 1896 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1897 unsigned long len) 1898 { 1899 gfn_t gfn = gpa >> PAGE_SHIFT; 1900 int seg; 1901 int offset = offset_in_page(gpa); 1902 int ret; 1903 1904 while ((seg = next_segment(len, offset)) != 0) { 1905 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1906 if (ret < 0) 1907 return ret; 1908 offset = 0; 1909 len -= seg; 1910 data += seg; 1911 ++gfn; 1912 } 1913 return 0; 1914 } 1915 EXPORT_SYMBOL_GPL(kvm_write_guest); 1916 1917 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1918 unsigned long len) 1919 { 1920 gfn_t gfn = gpa >> PAGE_SHIFT; 1921 int seg; 1922 int offset = offset_in_page(gpa); 1923 int ret; 1924 1925 while ((seg = next_segment(len, offset)) != 0) { 1926 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1927 if (ret < 0) 1928 return ret; 1929 offset = 0; 1930 len -= seg; 1931 data += seg; 1932 ++gfn; 1933 } 1934 return 0; 1935 } 1936 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1937 1938 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1939 gpa_t gpa, unsigned long len) 1940 { 1941 struct kvm_memslots *slots = kvm_memslots(kvm); 1942 int offset = offset_in_page(gpa); 1943 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1944 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1945 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1946 gfn_t nr_pages_avail; 1947 1948 ghc->gpa = gpa; 1949 ghc->generation = slots->generation; 1950 ghc->len = len; 1951 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1952 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1953 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1954 ghc->hva += offset; 1955 } else { 1956 /* 1957 * If the requested region crosses two memslots, we still 1958 * verify that the entire region is valid here. 1959 */ 1960 while (start_gfn <= end_gfn) { 1961 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1962 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1963 &nr_pages_avail); 1964 if (kvm_is_error_hva(ghc->hva)) 1965 return -EFAULT; 1966 start_gfn += nr_pages_avail; 1967 } 1968 /* Use the slow path for cross page reads and writes. */ 1969 ghc->memslot = NULL; 1970 } 1971 return 0; 1972 } 1973 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1974 1975 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1976 void *data, int offset, unsigned long len) 1977 { 1978 struct kvm_memslots *slots = kvm_memslots(kvm); 1979 int r; 1980 gpa_t gpa = ghc->gpa + offset; 1981 1982 BUG_ON(len + offset > ghc->len); 1983 1984 if (slots->generation != ghc->generation) 1985 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1986 1987 if (unlikely(!ghc->memslot)) 1988 return kvm_write_guest(kvm, gpa, data, len); 1989 1990 if (kvm_is_error_hva(ghc->hva)) 1991 return -EFAULT; 1992 1993 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 1994 if (r) 1995 return -EFAULT; 1996 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT); 1997 1998 return 0; 1999 } 2000 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 2001 2002 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2003 void *data, unsigned long len) 2004 { 2005 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 2006 } 2007 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 2008 2009 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2010 void *data, unsigned long len) 2011 { 2012 struct kvm_memslots *slots = kvm_memslots(kvm); 2013 int r; 2014 2015 BUG_ON(len > ghc->len); 2016 2017 if (slots->generation != ghc->generation) 2018 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 2019 2020 if (unlikely(!ghc->memslot)) 2021 return kvm_read_guest(kvm, ghc->gpa, data, len); 2022 2023 if (kvm_is_error_hva(ghc->hva)) 2024 return -EFAULT; 2025 2026 r = __copy_from_user(data, (void __user *)ghc->hva, len); 2027 if (r) 2028 return -EFAULT; 2029 2030 return 0; 2031 } 2032 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2033 2034 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 2035 { 2036 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2037 2038 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2039 } 2040 EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 2041 2042 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2043 { 2044 gfn_t gfn = gpa >> PAGE_SHIFT; 2045 int seg; 2046 int offset = offset_in_page(gpa); 2047 int ret; 2048 2049 while ((seg = next_segment(len, offset)) != 0) { 2050 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 2051 if (ret < 0) 2052 return ret; 2053 offset = 0; 2054 len -= seg; 2055 ++gfn; 2056 } 2057 return 0; 2058 } 2059 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2060 2061 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 2062 gfn_t gfn) 2063 { 2064 if (memslot && memslot->dirty_bitmap) { 2065 unsigned long rel_gfn = gfn - memslot->base_gfn; 2066 2067 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2068 } 2069 } 2070 2071 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2072 { 2073 struct kvm_memory_slot *memslot; 2074 2075 memslot = gfn_to_memslot(kvm, gfn); 2076 mark_page_dirty_in_slot(memslot, gfn); 2077 } 2078 EXPORT_SYMBOL_GPL(mark_page_dirty); 2079 2080 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2081 { 2082 struct kvm_memory_slot *memslot; 2083 2084 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2085 mark_page_dirty_in_slot(memslot, gfn); 2086 } 2087 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2088 2089 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2090 { 2091 unsigned int old, val, grow; 2092 2093 old = val = vcpu->halt_poll_ns; 2094 grow = READ_ONCE(halt_poll_ns_grow); 2095 /* 10us base */ 2096 if (val == 0 && grow) 2097 val = 10000; 2098 else 2099 val *= grow; 2100 2101 if (val > halt_poll_ns) 2102 val = halt_poll_ns; 2103 2104 vcpu->halt_poll_ns = val; 2105 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2106 } 2107 2108 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 2109 { 2110 unsigned int old, val, shrink; 2111 2112 old = val = vcpu->halt_poll_ns; 2113 shrink = READ_ONCE(halt_poll_ns_shrink); 2114 if (shrink == 0) 2115 val = 0; 2116 else 2117 val /= shrink; 2118 2119 vcpu->halt_poll_ns = val; 2120 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 2121 } 2122 2123 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 2124 { 2125 if (kvm_arch_vcpu_runnable(vcpu)) { 2126 kvm_make_request(KVM_REQ_UNHALT, vcpu); 2127 return -EINTR; 2128 } 2129 if (kvm_cpu_has_pending_timer(vcpu)) 2130 return -EINTR; 2131 if (signal_pending(current)) 2132 return -EINTR; 2133 2134 return 0; 2135 } 2136 2137 /* 2138 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2139 */ 2140 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2141 { 2142 ktime_t start, cur; 2143 DECLARE_SWAITQUEUE(wait); 2144 bool waited = false; 2145 u64 block_ns; 2146 2147 start = cur = ktime_get(); 2148 if (vcpu->halt_poll_ns) { 2149 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2150 2151 ++vcpu->stat.halt_attempted_poll; 2152 do { 2153 /* 2154 * This sets KVM_REQ_UNHALT if an interrupt 2155 * arrives. 2156 */ 2157 if (kvm_vcpu_check_block(vcpu) < 0) { 2158 ++vcpu->stat.halt_successful_poll; 2159 if (!vcpu_valid_wakeup(vcpu)) 2160 ++vcpu->stat.halt_poll_invalid; 2161 goto out; 2162 } 2163 cur = ktime_get(); 2164 } while (single_task_running() && ktime_before(cur, stop)); 2165 } 2166 2167 kvm_arch_vcpu_blocking(vcpu); 2168 2169 for (;;) { 2170 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2171 2172 if (kvm_vcpu_check_block(vcpu) < 0) 2173 break; 2174 2175 waited = true; 2176 schedule(); 2177 } 2178 2179 finish_swait(&vcpu->wq, &wait); 2180 cur = ktime_get(); 2181 2182 kvm_arch_vcpu_unblocking(vcpu); 2183 out: 2184 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2185 2186 if (!vcpu_valid_wakeup(vcpu)) 2187 shrink_halt_poll_ns(vcpu); 2188 else if (halt_poll_ns) { 2189 if (block_ns <= vcpu->halt_poll_ns) 2190 ; 2191 /* we had a long block, shrink polling */ 2192 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2193 shrink_halt_poll_ns(vcpu); 2194 /* we had a short halt and our poll time is too small */ 2195 else if (vcpu->halt_poll_ns < halt_poll_ns && 2196 block_ns < halt_poll_ns) 2197 grow_halt_poll_ns(vcpu); 2198 } else 2199 vcpu->halt_poll_ns = 0; 2200 2201 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 2202 kvm_arch_vcpu_block_finish(vcpu); 2203 } 2204 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2205 2206 #ifndef CONFIG_S390 2207 void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 2208 { 2209 struct swait_queue_head *wqp; 2210 2211 wqp = kvm_arch_vcpu_wq(vcpu); 2212 if (swait_active(wqp)) { 2213 swake_up(wqp); 2214 ++vcpu->stat.halt_wakeup; 2215 } 2216 2217 } 2218 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 2219 2220 /* 2221 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2222 */ 2223 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2224 { 2225 int me; 2226 int cpu = vcpu->cpu; 2227 2228 kvm_vcpu_wake_up(vcpu); 2229 me = get_cpu(); 2230 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2231 if (kvm_arch_vcpu_should_kick(vcpu)) 2232 smp_send_reschedule(cpu); 2233 put_cpu(); 2234 } 2235 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2236 #endif /* !CONFIG_S390 */ 2237 2238 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2239 { 2240 struct pid *pid; 2241 struct task_struct *task = NULL; 2242 int ret = 0; 2243 2244 rcu_read_lock(); 2245 pid = rcu_dereference(target->pid); 2246 if (pid) 2247 task = get_pid_task(pid, PIDTYPE_PID); 2248 rcu_read_unlock(); 2249 if (!task) 2250 return ret; 2251 ret = yield_to(task, 1); 2252 put_task_struct(task); 2253 2254 return ret; 2255 } 2256 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2257 2258 /* 2259 * Helper that checks whether a VCPU is eligible for directed yield. 2260 * Most eligible candidate to yield is decided by following heuristics: 2261 * 2262 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2263 * (preempted lock holder), indicated by @in_spin_loop. 2264 * Set at the beiginning and cleared at the end of interception/PLE handler. 2265 * 2266 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2267 * chance last time (mostly it has become eligible now since we have probably 2268 * yielded to lockholder in last iteration. This is done by toggling 2269 * @dy_eligible each time a VCPU checked for eligibility.) 2270 * 2271 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2272 * to preempted lock-holder could result in wrong VCPU selection and CPU 2273 * burning. Giving priority for a potential lock-holder increases lock 2274 * progress. 2275 * 2276 * Since algorithm is based on heuristics, accessing another VCPU data without 2277 * locking does not harm. It may result in trying to yield to same VCPU, fail 2278 * and continue with next VCPU and so on. 2279 */ 2280 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2281 { 2282 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2283 bool eligible; 2284 2285 eligible = !vcpu->spin_loop.in_spin_loop || 2286 vcpu->spin_loop.dy_eligible; 2287 2288 if (vcpu->spin_loop.in_spin_loop) 2289 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2290 2291 return eligible; 2292 #else 2293 return true; 2294 #endif 2295 } 2296 2297 void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2298 { 2299 struct kvm *kvm = me->kvm; 2300 struct kvm_vcpu *vcpu; 2301 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2302 int yielded = 0; 2303 int try = 3; 2304 int pass; 2305 int i; 2306 2307 kvm_vcpu_set_in_spin_loop(me, true); 2308 /* 2309 * We boost the priority of a VCPU that is runnable but not 2310 * currently running, because it got preempted by something 2311 * else and called schedule in __vcpu_run. Hopefully that 2312 * VCPU is holding the lock that we need and will release it. 2313 * We approximate round-robin by starting at the last boosted VCPU. 2314 */ 2315 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2316 kvm_for_each_vcpu(i, vcpu, kvm) { 2317 if (!pass && i <= last_boosted_vcpu) { 2318 i = last_boosted_vcpu; 2319 continue; 2320 } else if (pass && i > last_boosted_vcpu) 2321 break; 2322 if (!ACCESS_ONCE(vcpu->preempted)) 2323 continue; 2324 if (vcpu == me) 2325 continue; 2326 if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2327 continue; 2328 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2329 continue; 2330 2331 yielded = kvm_vcpu_yield_to(vcpu); 2332 if (yielded > 0) { 2333 kvm->last_boosted_vcpu = i; 2334 break; 2335 } else if (yielded < 0) { 2336 try--; 2337 if (!try) 2338 break; 2339 } 2340 } 2341 } 2342 kvm_vcpu_set_in_spin_loop(me, false); 2343 2344 /* Ensure vcpu is not eligible during next spinloop */ 2345 kvm_vcpu_set_dy_eligible(me, false); 2346 } 2347 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2348 2349 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2350 { 2351 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 2352 struct page *page; 2353 2354 if (vmf->pgoff == 0) 2355 page = virt_to_page(vcpu->run); 2356 #ifdef CONFIG_X86 2357 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2358 page = virt_to_page(vcpu->arch.pio_data); 2359 #endif 2360 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2361 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2362 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2363 #endif 2364 else 2365 return kvm_arch_vcpu_fault(vcpu, vmf); 2366 get_page(page); 2367 vmf->page = page; 2368 return 0; 2369 } 2370 2371 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2372 .fault = kvm_vcpu_fault, 2373 }; 2374 2375 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2376 { 2377 vma->vm_ops = &kvm_vcpu_vm_ops; 2378 return 0; 2379 } 2380 2381 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2382 { 2383 struct kvm_vcpu *vcpu = filp->private_data; 2384 2385 debugfs_remove_recursive(vcpu->debugfs_dentry); 2386 kvm_put_kvm(vcpu->kvm); 2387 return 0; 2388 } 2389 2390 static struct file_operations kvm_vcpu_fops = { 2391 .release = kvm_vcpu_release, 2392 .unlocked_ioctl = kvm_vcpu_ioctl, 2393 #ifdef CONFIG_KVM_COMPAT 2394 .compat_ioctl = kvm_vcpu_compat_ioctl, 2395 #endif 2396 .mmap = kvm_vcpu_mmap, 2397 .llseek = noop_llseek, 2398 }; 2399 2400 /* 2401 * Allocates an inode for the vcpu. 2402 */ 2403 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2404 { 2405 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2406 } 2407 2408 static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 2409 { 2410 char dir_name[ITOA_MAX_LEN * 2]; 2411 int ret; 2412 2413 if (!kvm_arch_has_vcpu_debugfs()) 2414 return 0; 2415 2416 if (!debugfs_initialized()) 2417 return 0; 2418 2419 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 2420 vcpu->debugfs_dentry = debugfs_create_dir(dir_name, 2421 vcpu->kvm->debugfs_dentry); 2422 if (!vcpu->debugfs_dentry) 2423 return -ENOMEM; 2424 2425 ret = kvm_arch_create_vcpu_debugfs(vcpu); 2426 if (ret < 0) { 2427 debugfs_remove_recursive(vcpu->debugfs_dentry); 2428 return ret; 2429 } 2430 2431 return 0; 2432 } 2433 2434 /* 2435 * Creates some virtual cpus. Good luck creating more than one. 2436 */ 2437 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2438 { 2439 int r; 2440 struct kvm_vcpu *vcpu; 2441 2442 if (id >= KVM_MAX_VCPU_ID) 2443 return -EINVAL; 2444 2445 mutex_lock(&kvm->lock); 2446 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 2447 mutex_unlock(&kvm->lock); 2448 return -EINVAL; 2449 } 2450 2451 kvm->created_vcpus++; 2452 mutex_unlock(&kvm->lock); 2453 2454 vcpu = kvm_arch_vcpu_create(kvm, id); 2455 if (IS_ERR(vcpu)) { 2456 r = PTR_ERR(vcpu); 2457 goto vcpu_decrement; 2458 } 2459 2460 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2461 2462 r = kvm_arch_vcpu_setup(vcpu); 2463 if (r) 2464 goto vcpu_destroy; 2465 2466 r = kvm_create_vcpu_debugfs(vcpu); 2467 if (r) 2468 goto vcpu_destroy; 2469 2470 mutex_lock(&kvm->lock); 2471 if (kvm_get_vcpu_by_id(kvm, id)) { 2472 r = -EEXIST; 2473 goto unlock_vcpu_destroy; 2474 } 2475 2476 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2477 2478 /* Now it's all set up, let userspace reach it */ 2479 kvm_get_kvm(kvm); 2480 r = create_vcpu_fd(vcpu); 2481 if (r < 0) { 2482 kvm_put_kvm(kvm); 2483 goto unlock_vcpu_destroy; 2484 } 2485 2486 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2487 2488 /* 2489 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2490 * before kvm->online_vcpu's incremented value. 2491 */ 2492 smp_wmb(); 2493 atomic_inc(&kvm->online_vcpus); 2494 2495 mutex_unlock(&kvm->lock); 2496 kvm_arch_vcpu_postcreate(vcpu); 2497 return r; 2498 2499 unlock_vcpu_destroy: 2500 mutex_unlock(&kvm->lock); 2501 debugfs_remove_recursive(vcpu->debugfs_dentry); 2502 vcpu_destroy: 2503 kvm_arch_vcpu_destroy(vcpu); 2504 vcpu_decrement: 2505 mutex_lock(&kvm->lock); 2506 kvm->created_vcpus--; 2507 mutex_unlock(&kvm->lock); 2508 return r; 2509 } 2510 2511 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2512 { 2513 if (sigset) { 2514 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2515 vcpu->sigset_active = 1; 2516 vcpu->sigset = *sigset; 2517 } else 2518 vcpu->sigset_active = 0; 2519 return 0; 2520 } 2521 2522 static long kvm_vcpu_ioctl(struct file *filp, 2523 unsigned int ioctl, unsigned long arg) 2524 { 2525 struct kvm_vcpu *vcpu = filp->private_data; 2526 void __user *argp = (void __user *)arg; 2527 int r; 2528 struct kvm_fpu *fpu = NULL; 2529 struct kvm_sregs *kvm_sregs = NULL; 2530 2531 if (vcpu->kvm->mm != current->mm) 2532 return -EIO; 2533 2534 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2535 return -EINVAL; 2536 2537 #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2538 /* 2539 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2540 * so vcpu_load() would break it. 2541 */ 2542 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2543 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2544 #endif 2545 2546 2547 r = vcpu_load(vcpu); 2548 if (r) 2549 return r; 2550 switch (ioctl) { 2551 case KVM_RUN: 2552 r = -EINVAL; 2553 if (arg) 2554 goto out; 2555 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2556 /* The thread running this VCPU changed. */ 2557 struct pid *oldpid = vcpu->pid; 2558 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2559 2560 rcu_assign_pointer(vcpu->pid, newpid); 2561 if (oldpid) 2562 synchronize_rcu(); 2563 put_pid(oldpid); 2564 } 2565 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2566 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2567 break; 2568 case KVM_GET_REGS: { 2569 struct kvm_regs *kvm_regs; 2570 2571 r = -ENOMEM; 2572 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2573 if (!kvm_regs) 2574 goto out; 2575 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2576 if (r) 2577 goto out_free1; 2578 r = -EFAULT; 2579 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2580 goto out_free1; 2581 r = 0; 2582 out_free1: 2583 kfree(kvm_regs); 2584 break; 2585 } 2586 case KVM_SET_REGS: { 2587 struct kvm_regs *kvm_regs; 2588 2589 r = -ENOMEM; 2590 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2591 if (IS_ERR(kvm_regs)) { 2592 r = PTR_ERR(kvm_regs); 2593 goto out; 2594 } 2595 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2596 kfree(kvm_regs); 2597 break; 2598 } 2599 case KVM_GET_SREGS: { 2600 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2601 r = -ENOMEM; 2602 if (!kvm_sregs) 2603 goto out; 2604 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2605 if (r) 2606 goto out; 2607 r = -EFAULT; 2608 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2609 goto out; 2610 r = 0; 2611 break; 2612 } 2613 case KVM_SET_SREGS: { 2614 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2615 if (IS_ERR(kvm_sregs)) { 2616 r = PTR_ERR(kvm_sregs); 2617 kvm_sregs = NULL; 2618 goto out; 2619 } 2620 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2621 break; 2622 } 2623 case KVM_GET_MP_STATE: { 2624 struct kvm_mp_state mp_state; 2625 2626 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2627 if (r) 2628 goto out; 2629 r = -EFAULT; 2630 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2631 goto out; 2632 r = 0; 2633 break; 2634 } 2635 case KVM_SET_MP_STATE: { 2636 struct kvm_mp_state mp_state; 2637 2638 r = -EFAULT; 2639 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2640 goto out; 2641 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2642 break; 2643 } 2644 case KVM_TRANSLATE: { 2645 struct kvm_translation tr; 2646 2647 r = -EFAULT; 2648 if (copy_from_user(&tr, argp, sizeof(tr))) 2649 goto out; 2650 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2651 if (r) 2652 goto out; 2653 r = -EFAULT; 2654 if (copy_to_user(argp, &tr, sizeof(tr))) 2655 goto out; 2656 r = 0; 2657 break; 2658 } 2659 case KVM_SET_GUEST_DEBUG: { 2660 struct kvm_guest_debug dbg; 2661 2662 r = -EFAULT; 2663 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2664 goto out; 2665 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2666 break; 2667 } 2668 case KVM_SET_SIGNAL_MASK: { 2669 struct kvm_signal_mask __user *sigmask_arg = argp; 2670 struct kvm_signal_mask kvm_sigmask; 2671 sigset_t sigset, *p; 2672 2673 p = NULL; 2674 if (argp) { 2675 r = -EFAULT; 2676 if (copy_from_user(&kvm_sigmask, argp, 2677 sizeof(kvm_sigmask))) 2678 goto out; 2679 r = -EINVAL; 2680 if (kvm_sigmask.len != sizeof(sigset)) 2681 goto out; 2682 r = -EFAULT; 2683 if (copy_from_user(&sigset, sigmask_arg->sigset, 2684 sizeof(sigset))) 2685 goto out; 2686 p = &sigset; 2687 } 2688 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2689 break; 2690 } 2691 case KVM_GET_FPU: { 2692 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2693 r = -ENOMEM; 2694 if (!fpu) 2695 goto out; 2696 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2697 if (r) 2698 goto out; 2699 r = -EFAULT; 2700 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2701 goto out; 2702 r = 0; 2703 break; 2704 } 2705 case KVM_SET_FPU: { 2706 fpu = memdup_user(argp, sizeof(*fpu)); 2707 if (IS_ERR(fpu)) { 2708 r = PTR_ERR(fpu); 2709 fpu = NULL; 2710 goto out; 2711 } 2712 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2713 break; 2714 } 2715 default: 2716 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2717 } 2718 out: 2719 vcpu_put(vcpu); 2720 kfree(fpu); 2721 kfree(kvm_sregs); 2722 return r; 2723 } 2724 2725 #ifdef CONFIG_KVM_COMPAT 2726 static long kvm_vcpu_compat_ioctl(struct file *filp, 2727 unsigned int ioctl, unsigned long arg) 2728 { 2729 struct kvm_vcpu *vcpu = filp->private_data; 2730 void __user *argp = compat_ptr(arg); 2731 int r; 2732 2733 if (vcpu->kvm->mm != current->mm) 2734 return -EIO; 2735 2736 switch (ioctl) { 2737 case KVM_SET_SIGNAL_MASK: { 2738 struct kvm_signal_mask __user *sigmask_arg = argp; 2739 struct kvm_signal_mask kvm_sigmask; 2740 compat_sigset_t csigset; 2741 sigset_t sigset; 2742 2743 if (argp) { 2744 r = -EFAULT; 2745 if (copy_from_user(&kvm_sigmask, argp, 2746 sizeof(kvm_sigmask))) 2747 goto out; 2748 r = -EINVAL; 2749 if (kvm_sigmask.len != sizeof(csigset)) 2750 goto out; 2751 r = -EFAULT; 2752 if (copy_from_user(&csigset, sigmask_arg->sigset, 2753 sizeof(csigset))) 2754 goto out; 2755 sigset_from_compat(&sigset, &csigset); 2756 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2757 } else 2758 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2759 break; 2760 } 2761 default: 2762 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2763 } 2764 2765 out: 2766 return r; 2767 } 2768 #endif 2769 2770 static int kvm_device_ioctl_attr(struct kvm_device *dev, 2771 int (*accessor)(struct kvm_device *dev, 2772 struct kvm_device_attr *attr), 2773 unsigned long arg) 2774 { 2775 struct kvm_device_attr attr; 2776 2777 if (!accessor) 2778 return -EPERM; 2779 2780 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2781 return -EFAULT; 2782 2783 return accessor(dev, &attr); 2784 } 2785 2786 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2787 unsigned long arg) 2788 { 2789 struct kvm_device *dev = filp->private_data; 2790 2791 switch (ioctl) { 2792 case KVM_SET_DEVICE_ATTR: 2793 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2794 case KVM_GET_DEVICE_ATTR: 2795 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2796 case KVM_HAS_DEVICE_ATTR: 2797 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2798 default: 2799 if (dev->ops->ioctl) 2800 return dev->ops->ioctl(dev, ioctl, arg); 2801 2802 return -ENOTTY; 2803 } 2804 } 2805 2806 static int kvm_device_release(struct inode *inode, struct file *filp) 2807 { 2808 struct kvm_device *dev = filp->private_data; 2809 struct kvm *kvm = dev->kvm; 2810 2811 kvm_put_kvm(kvm); 2812 return 0; 2813 } 2814 2815 static const struct file_operations kvm_device_fops = { 2816 .unlocked_ioctl = kvm_device_ioctl, 2817 #ifdef CONFIG_KVM_COMPAT 2818 .compat_ioctl = kvm_device_ioctl, 2819 #endif 2820 .release = kvm_device_release, 2821 }; 2822 2823 struct kvm_device *kvm_device_from_filp(struct file *filp) 2824 { 2825 if (filp->f_op != &kvm_device_fops) 2826 return NULL; 2827 2828 return filp->private_data; 2829 } 2830 2831 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2832 #ifdef CONFIG_KVM_MPIC 2833 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2834 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2835 #endif 2836 2837 #ifdef CONFIG_KVM_XICS 2838 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2839 #endif 2840 }; 2841 2842 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2843 { 2844 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2845 return -ENOSPC; 2846 2847 if (kvm_device_ops_table[type] != NULL) 2848 return -EEXIST; 2849 2850 kvm_device_ops_table[type] = ops; 2851 return 0; 2852 } 2853 2854 void kvm_unregister_device_ops(u32 type) 2855 { 2856 if (kvm_device_ops_table[type] != NULL) 2857 kvm_device_ops_table[type] = NULL; 2858 } 2859 2860 static int kvm_ioctl_create_device(struct kvm *kvm, 2861 struct kvm_create_device *cd) 2862 { 2863 struct kvm_device_ops *ops = NULL; 2864 struct kvm_device *dev; 2865 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2866 int ret; 2867 2868 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2869 return -ENODEV; 2870 2871 ops = kvm_device_ops_table[cd->type]; 2872 if (ops == NULL) 2873 return -ENODEV; 2874 2875 if (test) 2876 return 0; 2877 2878 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2879 if (!dev) 2880 return -ENOMEM; 2881 2882 dev->ops = ops; 2883 dev->kvm = kvm; 2884 2885 mutex_lock(&kvm->lock); 2886 ret = ops->create(dev, cd->type); 2887 if (ret < 0) { 2888 mutex_unlock(&kvm->lock); 2889 kfree(dev); 2890 return ret; 2891 } 2892 list_add(&dev->vm_node, &kvm->devices); 2893 mutex_unlock(&kvm->lock); 2894 2895 if (ops->init) 2896 ops->init(dev); 2897 2898 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2899 if (ret < 0) { 2900 mutex_lock(&kvm->lock); 2901 list_del(&dev->vm_node); 2902 mutex_unlock(&kvm->lock); 2903 ops->destroy(dev); 2904 return ret; 2905 } 2906 2907 kvm_get_kvm(kvm); 2908 cd->fd = ret; 2909 return 0; 2910 } 2911 2912 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2913 { 2914 switch (arg) { 2915 case KVM_CAP_USER_MEMORY: 2916 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2917 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2918 case KVM_CAP_INTERNAL_ERROR_DATA: 2919 #ifdef CONFIG_HAVE_KVM_MSI 2920 case KVM_CAP_SIGNAL_MSI: 2921 #endif 2922 #ifdef CONFIG_HAVE_KVM_IRQFD 2923 case KVM_CAP_IRQFD: 2924 case KVM_CAP_IRQFD_RESAMPLE: 2925 #endif 2926 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2927 case KVM_CAP_CHECK_EXTENSION_VM: 2928 return 1; 2929 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2930 case KVM_CAP_IRQ_ROUTING: 2931 return KVM_MAX_IRQ_ROUTES; 2932 #endif 2933 #if KVM_ADDRESS_SPACE_NUM > 1 2934 case KVM_CAP_MULTI_ADDRESS_SPACE: 2935 return KVM_ADDRESS_SPACE_NUM; 2936 #endif 2937 case KVM_CAP_MAX_VCPU_ID: 2938 return KVM_MAX_VCPU_ID; 2939 default: 2940 break; 2941 } 2942 return kvm_vm_ioctl_check_extension(kvm, arg); 2943 } 2944 2945 static long kvm_vm_ioctl(struct file *filp, 2946 unsigned int ioctl, unsigned long arg) 2947 { 2948 struct kvm *kvm = filp->private_data; 2949 void __user *argp = (void __user *)arg; 2950 int r; 2951 2952 if (kvm->mm != current->mm) 2953 return -EIO; 2954 switch (ioctl) { 2955 case KVM_CREATE_VCPU: 2956 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2957 break; 2958 case KVM_SET_USER_MEMORY_REGION: { 2959 struct kvm_userspace_memory_region kvm_userspace_mem; 2960 2961 r = -EFAULT; 2962 if (copy_from_user(&kvm_userspace_mem, argp, 2963 sizeof(kvm_userspace_mem))) 2964 goto out; 2965 2966 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2967 break; 2968 } 2969 case KVM_GET_DIRTY_LOG: { 2970 struct kvm_dirty_log log; 2971 2972 r = -EFAULT; 2973 if (copy_from_user(&log, argp, sizeof(log))) 2974 goto out; 2975 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2976 break; 2977 } 2978 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2979 case KVM_REGISTER_COALESCED_MMIO: { 2980 struct kvm_coalesced_mmio_zone zone; 2981 2982 r = -EFAULT; 2983 if (copy_from_user(&zone, argp, sizeof(zone))) 2984 goto out; 2985 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2986 break; 2987 } 2988 case KVM_UNREGISTER_COALESCED_MMIO: { 2989 struct kvm_coalesced_mmio_zone zone; 2990 2991 r = -EFAULT; 2992 if (copy_from_user(&zone, argp, sizeof(zone))) 2993 goto out; 2994 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2995 break; 2996 } 2997 #endif 2998 case KVM_IRQFD: { 2999 struct kvm_irqfd data; 3000 3001 r = -EFAULT; 3002 if (copy_from_user(&data, argp, sizeof(data))) 3003 goto out; 3004 r = kvm_irqfd(kvm, &data); 3005 break; 3006 } 3007 case KVM_IOEVENTFD: { 3008 struct kvm_ioeventfd data; 3009 3010 r = -EFAULT; 3011 if (copy_from_user(&data, argp, sizeof(data))) 3012 goto out; 3013 r = kvm_ioeventfd(kvm, &data); 3014 break; 3015 } 3016 #ifdef CONFIG_HAVE_KVM_MSI 3017 case KVM_SIGNAL_MSI: { 3018 struct kvm_msi msi; 3019 3020 r = -EFAULT; 3021 if (copy_from_user(&msi, argp, sizeof(msi))) 3022 goto out; 3023 r = kvm_send_userspace_msi(kvm, &msi); 3024 break; 3025 } 3026 #endif 3027 #ifdef __KVM_HAVE_IRQ_LINE 3028 case KVM_IRQ_LINE_STATUS: 3029 case KVM_IRQ_LINE: { 3030 struct kvm_irq_level irq_event; 3031 3032 r = -EFAULT; 3033 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 3034 goto out; 3035 3036 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 3037 ioctl == KVM_IRQ_LINE_STATUS); 3038 if (r) 3039 goto out; 3040 3041 r = -EFAULT; 3042 if (ioctl == KVM_IRQ_LINE_STATUS) { 3043 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 3044 goto out; 3045 } 3046 3047 r = 0; 3048 break; 3049 } 3050 #endif 3051 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 3052 case KVM_SET_GSI_ROUTING: { 3053 struct kvm_irq_routing routing; 3054 struct kvm_irq_routing __user *urouting; 3055 struct kvm_irq_routing_entry *entries = NULL; 3056 3057 r = -EFAULT; 3058 if (copy_from_user(&routing, argp, sizeof(routing))) 3059 goto out; 3060 r = -EINVAL; 3061 if (routing.nr > KVM_MAX_IRQ_ROUTES) 3062 goto out; 3063 if (routing.flags) 3064 goto out; 3065 if (routing.nr) { 3066 r = -ENOMEM; 3067 entries = vmalloc(routing.nr * sizeof(*entries)); 3068 if (!entries) 3069 goto out; 3070 r = -EFAULT; 3071 urouting = argp; 3072 if (copy_from_user(entries, urouting->entries, 3073 routing.nr * sizeof(*entries))) 3074 goto out_free_irq_routing; 3075 } 3076 r = kvm_set_irq_routing(kvm, entries, routing.nr, 3077 routing.flags); 3078 out_free_irq_routing: 3079 vfree(entries); 3080 break; 3081 } 3082 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 3083 case KVM_CREATE_DEVICE: { 3084 struct kvm_create_device cd; 3085 3086 r = -EFAULT; 3087 if (copy_from_user(&cd, argp, sizeof(cd))) 3088 goto out; 3089 3090 r = kvm_ioctl_create_device(kvm, &cd); 3091 if (r) 3092 goto out; 3093 3094 r = -EFAULT; 3095 if (copy_to_user(argp, &cd, sizeof(cd))) 3096 goto out; 3097 3098 r = 0; 3099 break; 3100 } 3101 case KVM_CHECK_EXTENSION: 3102 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 3103 break; 3104 default: 3105 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 3106 } 3107 out: 3108 return r; 3109 } 3110 3111 #ifdef CONFIG_KVM_COMPAT 3112 struct compat_kvm_dirty_log { 3113 __u32 slot; 3114 __u32 padding1; 3115 union { 3116 compat_uptr_t dirty_bitmap; /* one bit per page */ 3117 __u64 padding2; 3118 }; 3119 }; 3120 3121 static long kvm_vm_compat_ioctl(struct file *filp, 3122 unsigned int ioctl, unsigned long arg) 3123 { 3124 struct kvm *kvm = filp->private_data; 3125 int r; 3126 3127 if (kvm->mm != current->mm) 3128 return -EIO; 3129 switch (ioctl) { 3130 case KVM_GET_DIRTY_LOG: { 3131 struct compat_kvm_dirty_log compat_log; 3132 struct kvm_dirty_log log; 3133 3134 r = -EFAULT; 3135 if (copy_from_user(&compat_log, (void __user *)arg, 3136 sizeof(compat_log))) 3137 goto out; 3138 log.slot = compat_log.slot; 3139 log.padding1 = compat_log.padding1; 3140 log.padding2 = compat_log.padding2; 3141 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 3142 3143 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 3144 break; 3145 } 3146 default: 3147 r = kvm_vm_ioctl(filp, ioctl, arg); 3148 } 3149 3150 out: 3151 return r; 3152 } 3153 #endif 3154 3155 static struct file_operations kvm_vm_fops = { 3156 .release = kvm_vm_release, 3157 .unlocked_ioctl = kvm_vm_ioctl, 3158 #ifdef CONFIG_KVM_COMPAT 3159 .compat_ioctl = kvm_vm_compat_ioctl, 3160 #endif 3161 .llseek = noop_llseek, 3162 }; 3163 3164 static int kvm_dev_ioctl_create_vm(unsigned long type) 3165 { 3166 int r; 3167 struct kvm *kvm; 3168 struct file *file; 3169 3170 kvm = kvm_create_vm(type); 3171 if (IS_ERR(kvm)) 3172 return PTR_ERR(kvm); 3173 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3174 r = kvm_coalesced_mmio_init(kvm); 3175 if (r < 0) { 3176 kvm_put_kvm(kvm); 3177 return r; 3178 } 3179 #endif 3180 r = get_unused_fd_flags(O_CLOEXEC); 3181 if (r < 0) { 3182 kvm_put_kvm(kvm); 3183 return r; 3184 } 3185 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 3186 if (IS_ERR(file)) { 3187 put_unused_fd(r); 3188 kvm_put_kvm(kvm); 3189 return PTR_ERR(file); 3190 } 3191 3192 if (kvm_create_vm_debugfs(kvm, r) < 0) { 3193 put_unused_fd(r); 3194 fput(file); 3195 return -ENOMEM; 3196 } 3197 3198 fd_install(r, file); 3199 return r; 3200 } 3201 3202 static long kvm_dev_ioctl(struct file *filp, 3203 unsigned int ioctl, unsigned long arg) 3204 { 3205 long r = -EINVAL; 3206 3207 switch (ioctl) { 3208 case KVM_GET_API_VERSION: 3209 if (arg) 3210 goto out; 3211 r = KVM_API_VERSION; 3212 break; 3213 case KVM_CREATE_VM: 3214 r = kvm_dev_ioctl_create_vm(arg); 3215 break; 3216 case KVM_CHECK_EXTENSION: 3217 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 3218 break; 3219 case KVM_GET_VCPU_MMAP_SIZE: 3220 if (arg) 3221 goto out; 3222 r = PAGE_SIZE; /* struct kvm_run */ 3223 #ifdef CONFIG_X86 3224 r += PAGE_SIZE; /* pio data page */ 3225 #endif 3226 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3227 r += PAGE_SIZE; /* coalesced mmio ring page */ 3228 #endif 3229 break; 3230 case KVM_TRACE_ENABLE: 3231 case KVM_TRACE_PAUSE: 3232 case KVM_TRACE_DISABLE: 3233 r = -EOPNOTSUPP; 3234 break; 3235 default: 3236 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3237 } 3238 out: 3239 return r; 3240 } 3241 3242 static struct file_operations kvm_chardev_ops = { 3243 .unlocked_ioctl = kvm_dev_ioctl, 3244 .compat_ioctl = kvm_dev_ioctl, 3245 .llseek = noop_llseek, 3246 }; 3247 3248 static struct miscdevice kvm_dev = { 3249 KVM_MINOR, 3250 "kvm", 3251 &kvm_chardev_ops, 3252 }; 3253 3254 static void hardware_enable_nolock(void *junk) 3255 { 3256 int cpu = raw_smp_processor_id(); 3257 int r; 3258 3259 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3260 return; 3261 3262 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3263 3264 r = kvm_arch_hardware_enable(); 3265 3266 if (r) { 3267 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3268 atomic_inc(&hardware_enable_failed); 3269 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3270 } 3271 } 3272 3273 static int kvm_starting_cpu(unsigned int cpu) 3274 { 3275 raw_spin_lock(&kvm_count_lock); 3276 if (kvm_usage_count) 3277 hardware_enable_nolock(NULL); 3278 raw_spin_unlock(&kvm_count_lock); 3279 return 0; 3280 } 3281 3282 static void hardware_disable_nolock(void *junk) 3283 { 3284 int cpu = raw_smp_processor_id(); 3285 3286 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3287 return; 3288 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3289 kvm_arch_hardware_disable(); 3290 } 3291 3292 static int kvm_dying_cpu(unsigned int cpu) 3293 { 3294 raw_spin_lock(&kvm_count_lock); 3295 if (kvm_usage_count) 3296 hardware_disable_nolock(NULL); 3297 raw_spin_unlock(&kvm_count_lock); 3298 return 0; 3299 } 3300 3301 static void hardware_disable_all_nolock(void) 3302 { 3303 BUG_ON(!kvm_usage_count); 3304 3305 kvm_usage_count--; 3306 if (!kvm_usage_count) 3307 on_each_cpu(hardware_disable_nolock, NULL, 1); 3308 } 3309 3310 static void hardware_disable_all(void) 3311 { 3312 raw_spin_lock(&kvm_count_lock); 3313 hardware_disable_all_nolock(); 3314 raw_spin_unlock(&kvm_count_lock); 3315 } 3316 3317 static int hardware_enable_all(void) 3318 { 3319 int r = 0; 3320 3321 raw_spin_lock(&kvm_count_lock); 3322 3323 kvm_usage_count++; 3324 if (kvm_usage_count == 1) { 3325 atomic_set(&hardware_enable_failed, 0); 3326 on_each_cpu(hardware_enable_nolock, NULL, 1); 3327 3328 if (atomic_read(&hardware_enable_failed)) { 3329 hardware_disable_all_nolock(); 3330 r = -EBUSY; 3331 } 3332 } 3333 3334 raw_spin_unlock(&kvm_count_lock); 3335 3336 return r; 3337 } 3338 3339 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3340 void *v) 3341 { 3342 /* 3343 * Some (well, at least mine) BIOSes hang on reboot if 3344 * in vmx root mode. 3345 * 3346 * And Intel TXT required VMX off for all cpu when system shutdown. 3347 */ 3348 pr_info("kvm: exiting hardware virtualization\n"); 3349 kvm_rebooting = true; 3350 on_each_cpu(hardware_disable_nolock, NULL, 1); 3351 return NOTIFY_OK; 3352 } 3353 3354 static struct notifier_block kvm_reboot_notifier = { 3355 .notifier_call = kvm_reboot, 3356 .priority = 0, 3357 }; 3358 3359 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3360 { 3361 int i; 3362 3363 for (i = 0; i < bus->dev_count; i++) { 3364 struct kvm_io_device *pos = bus->range[i].dev; 3365 3366 kvm_iodevice_destructor(pos); 3367 } 3368 kfree(bus); 3369 } 3370 3371 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3372 const struct kvm_io_range *r2) 3373 { 3374 gpa_t addr1 = r1->addr; 3375 gpa_t addr2 = r2->addr; 3376 3377 if (addr1 < addr2) 3378 return -1; 3379 3380 /* If r2->len == 0, match the exact address. If r2->len != 0, 3381 * accept any overlapping write. Any order is acceptable for 3382 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3383 * we process all of them. 3384 */ 3385 if (r2->len) { 3386 addr1 += r1->len; 3387 addr2 += r2->len; 3388 } 3389 3390 if (addr1 > addr2) 3391 return 1; 3392 3393 return 0; 3394 } 3395 3396 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3397 { 3398 return kvm_io_bus_cmp(p1, p2); 3399 } 3400 3401 static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3402 gpa_t addr, int len) 3403 { 3404 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3405 .addr = addr, 3406 .len = len, 3407 .dev = dev, 3408 }; 3409 3410 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3411 kvm_io_bus_sort_cmp, NULL); 3412 3413 return 0; 3414 } 3415 3416 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3417 gpa_t addr, int len) 3418 { 3419 struct kvm_io_range *range, key; 3420 int off; 3421 3422 key = (struct kvm_io_range) { 3423 .addr = addr, 3424 .len = len, 3425 }; 3426 3427 range = bsearch(&key, bus->range, bus->dev_count, 3428 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3429 if (range == NULL) 3430 return -ENOENT; 3431 3432 off = range - bus->range; 3433 3434 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3435 off--; 3436 3437 return off; 3438 } 3439 3440 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3441 struct kvm_io_range *range, const void *val) 3442 { 3443 int idx; 3444 3445 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3446 if (idx < 0) 3447 return -EOPNOTSUPP; 3448 3449 while (idx < bus->dev_count && 3450 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3451 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3452 range->len, val)) 3453 return idx; 3454 idx++; 3455 } 3456 3457 return -EOPNOTSUPP; 3458 } 3459 3460 /* kvm_io_bus_write - called under kvm->slots_lock */ 3461 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3462 int len, const void *val) 3463 { 3464 struct kvm_io_bus *bus; 3465 struct kvm_io_range range; 3466 int r; 3467 3468 range = (struct kvm_io_range) { 3469 .addr = addr, 3470 .len = len, 3471 }; 3472 3473 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3474 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3475 return r < 0 ? r : 0; 3476 } 3477 3478 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3479 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3480 gpa_t addr, int len, const void *val, long cookie) 3481 { 3482 struct kvm_io_bus *bus; 3483 struct kvm_io_range range; 3484 3485 range = (struct kvm_io_range) { 3486 .addr = addr, 3487 .len = len, 3488 }; 3489 3490 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3491 3492 /* First try the device referenced by cookie. */ 3493 if ((cookie >= 0) && (cookie < bus->dev_count) && 3494 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3495 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3496 val)) 3497 return cookie; 3498 3499 /* 3500 * cookie contained garbage; fall back to search and return the 3501 * correct cookie value. 3502 */ 3503 return __kvm_io_bus_write(vcpu, bus, &range, val); 3504 } 3505 3506 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3507 struct kvm_io_range *range, void *val) 3508 { 3509 int idx; 3510 3511 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3512 if (idx < 0) 3513 return -EOPNOTSUPP; 3514 3515 while (idx < bus->dev_count && 3516 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3517 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3518 range->len, val)) 3519 return idx; 3520 idx++; 3521 } 3522 3523 return -EOPNOTSUPP; 3524 } 3525 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3526 3527 /* kvm_io_bus_read - called under kvm->slots_lock */ 3528 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3529 int len, void *val) 3530 { 3531 struct kvm_io_bus *bus; 3532 struct kvm_io_range range; 3533 int r; 3534 3535 range = (struct kvm_io_range) { 3536 .addr = addr, 3537 .len = len, 3538 }; 3539 3540 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3541 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3542 return r < 0 ? r : 0; 3543 } 3544 3545 3546 /* Caller must hold slots_lock. */ 3547 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3548 int len, struct kvm_io_device *dev) 3549 { 3550 struct kvm_io_bus *new_bus, *bus; 3551 3552 bus = kvm->buses[bus_idx]; 3553 /* exclude ioeventfd which is limited by maximum fd */ 3554 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3555 return -ENOSPC; 3556 3557 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3558 sizeof(struct kvm_io_range)), GFP_KERNEL); 3559 if (!new_bus) 3560 return -ENOMEM; 3561 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3562 sizeof(struct kvm_io_range))); 3563 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3564 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3565 synchronize_srcu_expedited(&kvm->srcu); 3566 kfree(bus); 3567 3568 return 0; 3569 } 3570 3571 /* Caller must hold slots_lock. */ 3572 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3573 struct kvm_io_device *dev) 3574 { 3575 int i, r; 3576 struct kvm_io_bus *new_bus, *bus; 3577 3578 bus = kvm->buses[bus_idx]; 3579 r = -ENOENT; 3580 for (i = 0; i < bus->dev_count; i++) 3581 if (bus->range[i].dev == dev) { 3582 r = 0; 3583 break; 3584 } 3585 3586 if (r) 3587 return r; 3588 3589 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3590 sizeof(struct kvm_io_range)), GFP_KERNEL); 3591 if (!new_bus) 3592 return -ENOMEM; 3593 3594 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3595 new_bus->dev_count--; 3596 memcpy(new_bus->range + i, bus->range + i + 1, 3597 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3598 3599 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3600 synchronize_srcu_expedited(&kvm->srcu); 3601 kfree(bus); 3602 return r; 3603 } 3604 3605 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3606 gpa_t addr) 3607 { 3608 struct kvm_io_bus *bus; 3609 int dev_idx, srcu_idx; 3610 struct kvm_io_device *iodev = NULL; 3611 3612 srcu_idx = srcu_read_lock(&kvm->srcu); 3613 3614 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3615 3616 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3617 if (dev_idx < 0) 3618 goto out_unlock; 3619 3620 iodev = bus->range[dev_idx].dev; 3621 3622 out_unlock: 3623 srcu_read_unlock(&kvm->srcu, srcu_idx); 3624 3625 return iodev; 3626 } 3627 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 3628 3629 static int kvm_debugfs_open(struct inode *inode, struct file *file, 3630 int (*get)(void *, u64 *), int (*set)(void *, u64), 3631 const char *fmt) 3632 { 3633 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3634 inode->i_private; 3635 3636 /* The debugfs files are a reference to the kvm struct which 3637 * is still valid when kvm_destroy_vm is called. 3638 * To avoid the race between open and the removal of the debugfs 3639 * directory we test against the users count. 3640 */ 3641 if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0)) 3642 return -ENOENT; 3643 3644 if (simple_attr_open(inode, file, get, set, fmt)) { 3645 kvm_put_kvm(stat_data->kvm); 3646 return -ENOMEM; 3647 } 3648 3649 return 0; 3650 } 3651 3652 static int kvm_debugfs_release(struct inode *inode, struct file *file) 3653 { 3654 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 3655 inode->i_private; 3656 3657 simple_attr_release(inode, file); 3658 kvm_put_kvm(stat_data->kvm); 3659 3660 return 0; 3661 } 3662 3663 static int vm_stat_get_per_vm(void *data, u64 *val) 3664 { 3665 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3666 3667 *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset); 3668 3669 return 0; 3670 } 3671 3672 static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file) 3673 { 3674 __simple_attr_check_format("%llu\n", 0ull); 3675 return kvm_debugfs_open(inode, file, vm_stat_get_per_vm, 3676 NULL, "%llu\n"); 3677 } 3678 3679 static const struct file_operations vm_stat_get_per_vm_fops = { 3680 .owner = THIS_MODULE, 3681 .open = vm_stat_get_per_vm_open, 3682 .release = kvm_debugfs_release, 3683 .read = simple_attr_read, 3684 .write = simple_attr_write, 3685 .llseek = generic_file_llseek, 3686 }; 3687 3688 static int vcpu_stat_get_per_vm(void *data, u64 *val) 3689 { 3690 int i; 3691 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 3692 struct kvm_vcpu *vcpu; 3693 3694 *val = 0; 3695 3696 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) 3697 *val += *(u64 *)((void *)vcpu + stat_data->offset); 3698 3699 return 0; 3700 } 3701 3702 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file) 3703 { 3704 __simple_attr_check_format("%llu\n", 0ull); 3705 return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm, 3706 NULL, "%llu\n"); 3707 } 3708 3709 static const struct file_operations vcpu_stat_get_per_vm_fops = { 3710 .owner = THIS_MODULE, 3711 .open = vcpu_stat_get_per_vm_open, 3712 .release = kvm_debugfs_release, 3713 .read = simple_attr_read, 3714 .write = simple_attr_write, 3715 .llseek = generic_file_llseek, 3716 }; 3717 3718 static const struct file_operations *stat_fops_per_vm[] = { 3719 [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops, 3720 [KVM_STAT_VM] = &vm_stat_get_per_vm_fops, 3721 }; 3722 3723 static int vm_stat_get(void *_offset, u64 *val) 3724 { 3725 unsigned offset = (long)_offset; 3726 struct kvm *kvm; 3727 struct kvm_stat_data stat_tmp = {.offset = offset}; 3728 u64 tmp_val; 3729 3730 *val = 0; 3731 spin_lock(&kvm_lock); 3732 list_for_each_entry(kvm, &vm_list, vm_list) { 3733 stat_tmp.kvm = kvm; 3734 vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3735 *val += tmp_val; 3736 } 3737 spin_unlock(&kvm_lock); 3738 return 0; 3739 } 3740 3741 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3742 3743 static int vcpu_stat_get(void *_offset, u64 *val) 3744 { 3745 unsigned offset = (long)_offset; 3746 struct kvm *kvm; 3747 struct kvm_stat_data stat_tmp = {.offset = offset}; 3748 u64 tmp_val; 3749 3750 *val = 0; 3751 spin_lock(&kvm_lock); 3752 list_for_each_entry(kvm, &vm_list, vm_list) { 3753 stat_tmp.kvm = kvm; 3754 vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); 3755 *val += tmp_val; 3756 } 3757 spin_unlock(&kvm_lock); 3758 return 0; 3759 } 3760 3761 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3762 3763 static const struct file_operations *stat_fops[] = { 3764 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3765 [KVM_STAT_VM] = &vm_stat_fops, 3766 }; 3767 3768 static int kvm_init_debug(void) 3769 { 3770 int r = -EEXIST; 3771 struct kvm_stats_debugfs_item *p; 3772 3773 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3774 if (kvm_debugfs_dir == NULL) 3775 goto out; 3776 3777 kvm_debugfs_num_entries = 0; 3778 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 3779 if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3780 (void *)(long)p->offset, 3781 stat_fops[p->kind])) 3782 goto out_dir; 3783 } 3784 3785 return 0; 3786 3787 out_dir: 3788 debugfs_remove_recursive(kvm_debugfs_dir); 3789 out: 3790 return r; 3791 } 3792 3793 static int kvm_suspend(void) 3794 { 3795 if (kvm_usage_count) 3796 hardware_disable_nolock(NULL); 3797 return 0; 3798 } 3799 3800 static void kvm_resume(void) 3801 { 3802 if (kvm_usage_count) { 3803 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3804 hardware_enable_nolock(NULL); 3805 } 3806 } 3807 3808 static struct syscore_ops kvm_syscore_ops = { 3809 .suspend = kvm_suspend, 3810 .resume = kvm_resume, 3811 }; 3812 3813 static inline 3814 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3815 { 3816 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3817 } 3818 3819 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3820 { 3821 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3822 3823 if (vcpu->preempted) 3824 vcpu->preempted = false; 3825 3826 kvm_arch_sched_in(vcpu, cpu); 3827 3828 kvm_arch_vcpu_load(vcpu, cpu); 3829 } 3830 3831 static void kvm_sched_out(struct preempt_notifier *pn, 3832 struct task_struct *next) 3833 { 3834 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3835 3836 if (current->state == TASK_RUNNING) 3837 vcpu->preempted = true; 3838 kvm_arch_vcpu_put(vcpu); 3839 } 3840 3841 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3842 struct module *module) 3843 { 3844 int r; 3845 int cpu; 3846 3847 r = kvm_arch_init(opaque); 3848 if (r) 3849 goto out_fail; 3850 3851 /* 3852 * kvm_arch_init makes sure there's at most one caller 3853 * for architectures that support multiple implementations, 3854 * like intel and amd on x86. 3855 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3856 * conflicts in case kvm is already setup for another implementation. 3857 */ 3858 r = kvm_irqfd_init(); 3859 if (r) 3860 goto out_irqfd; 3861 3862 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3863 r = -ENOMEM; 3864 goto out_free_0; 3865 } 3866 3867 r = kvm_arch_hardware_setup(); 3868 if (r < 0) 3869 goto out_free_0a; 3870 3871 for_each_online_cpu(cpu) { 3872 smp_call_function_single(cpu, 3873 kvm_arch_check_processor_compat, 3874 &r, 1); 3875 if (r < 0) 3876 goto out_free_1; 3877 } 3878 3879 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", 3880 kvm_starting_cpu, kvm_dying_cpu); 3881 if (r) 3882 goto out_free_2; 3883 register_reboot_notifier(&kvm_reboot_notifier); 3884 3885 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3886 if (!vcpu_align) 3887 vcpu_align = __alignof__(struct kvm_vcpu); 3888 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3889 0, NULL); 3890 if (!kvm_vcpu_cache) { 3891 r = -ENOMEM; 3892 goto out_free_3; 3893 } 3894 3895 r = kvm_async_pf_init(); 3896 if (r) 3897 goto out_free; 3898 3899 kvm_chardev_ops.owner = module; 3900 kvm_vm_fops.owner = module; 3901 kvm_vcpu_fops.owner = module; 3902 3903 r = misc_register(&kvm_dev); 3904 if (r) { 3905 pr_err("kvm: misc device register failed\n"); 3906 goto out_unreg; 3907 } 3908 3909 register_syscore_ops(&kvm_syscore_ops); 3910 3911 kvm_preempt_ops.sched_in = kvm_sched_in; 3912 kvm_preempt_ops.sched_out = kvm_sched_out; 3913 3914 r = kvm_init_debug(); 3915 if (r) { 3916 pr_err("kvm: create debugfs files failed\n"); 3917 goto out_undebugfs; 3918 } 3919 3920 r = kvm_vfio_ops_init(); 3921 WARN_ON(r); 3922 3923 return 0; 3924 3925 out_undebugfs: 3926 unregister_syscore_ops(&kvm_syscore_ops); 3927 misc_deregister(&kvm_dev); 3928 out_unreg: 3929 kvm_async_pf_deinit(); 3930 out_free: 3931 kmem_cache_destroy(kvm_vcpu_cache); 3932 out_free_3: 3933 unregister_reboot_notifier(&kvm_reboot_notifier); 3934 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 3935 out_free_2: 3936 out_free_1: 3937 kvm_arch_hardware_unsetup(); 3938 out_free_0a: 3939 free_cpumask_var(cpus_hardware_enabled); 3940 out_free_0: 3941 kvm_irqfd_exit(); 3942 out_irqfd: 3943 kvm_arch_exit(); 3944 out_fail: 3945 return r; 3946 } 3947 EXPORT_SYMBOL_GPL(kvm_init); 3948 3949 void kvm_exit(void) 3950 { 3951 debugfs_remove_recursive(kvm_debugfs_dir); 3952 misc_deregister(&kvm_dev); 3953 kmem_cache_destroy(kvm_vcpu_cache); 3954 kvm_async_pf_deinit(); 3955 unregister_syscore_ops(&kvm_syscore_ops); 3956 unregister_reboot_notifier(&kvm_reboot_notifier); 3957 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 3958 on_each_cpu(hardware_disable_nolock, NULL, 1); 3959 kvm_arch_hardware_unsetup(); 3960 kvm_arch_exit(); 3961 kvm_irqfd_exit(); 3962 free_cpumask_var(cpus_hardware_enabled); 3963 kvm_vfio_ops_exit(); 3964 } 3965 EXPORT_SYMBOL_GPL(kvm_exit); 3966