1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "kvm_mm.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 104 LIST_HEAD(vm_list); 105 106 static cpumask_var_t cpus_hardware_enabled; 107 static int kvm_usage_count; 108 static atomic_t hardware_enable_failed; 109 110 static struct kmem_cache *kvm_vcpu_cache; 111 112 static __read_mostly struct preempt_ops kvm_preempt_ops; 113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 114 115 struct dentry *kvm_debugfs_dir; 116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 117 118 static const struct file_operations stat_fops_per_vm; 119 120 static struct file_operations kvm_chardev_ops; 121 122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 123 unsigned long arg); 124 #ifdef CONFIG_KVM_COMPAT 125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 126 unsigned long arg); 127 #define KVM_COMPAT(c) .compat_ioctl = (c) 128 #else 129 /* 130 * For architectures that don't implement a compat infrastructure, 131 * adopt a double line of defense: 132 * - Prevent a compat task from opening /dev/kvm 133 * - If the open has been done by a 64bit task, and the KVM fd 134 * passed to a compat task, let the ioctls fail. 135 */ 136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 137 unsigned long arg) { return -EINVAL; } 138 139 static int kvm_no_compat_open(struct inode *inode, struct file *file) 140 { 141 return is_compat_task() ? -ENODEV : 0; 142 } 143 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 144 .open = kvm_no_compat_open 145 #endif 146 static int hardware_enable_all(void); 147 static void hardware_disable_all(void); 148 149 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 150 151 __visible bool kvm_rebooting; 152 EXPORT_SYMBOL_GPL(kvm_rebooting); 153 154 #define KVM_EVENT_CREATE_VM 0 155 #define KVM_EVENT_DESTROY_VM 1 156 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 157 static unsigned long long kvm_createvm_count; 158 static unsigned long long kvm_active_vms; 159 160 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 161 162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 163 unsigned long start, unsigned long end) 164 { 165 } 166 167 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 168 { 169 /* 170 * The metadata used by is_zone_device_page() to determine whether or 171 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 172 * the device has been pinned, e.g. by get_user_pages(). WARN if the 173 * page_count() is zero to help detect bad usage of this helper. 174 */ 175 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 176 return false; 177 178 return is_zone_device_page(pfn_to_page(pfn)); 179 } 180 181 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 182 { 183 /* 184 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 185 * perspective they are "normal" pages, albeit with slightly different 186 * usage rules. 187 */ 188 if (pfn_valid(pfn)) 189 return PageReserved(pfn_to_page(pfn)) && 190 !is_zero_pfn(pfn) && 191 !kvm_is_zone_device_pfn(pfn); 192 193 return true; 194 } 195 196 /* 197 * Switches to specified vcpu, until a matching vcpu_put() 198 */ 199 void vcpu_load(struct kvm_vcpu *vcpu) 200 { 201 int cpu = get_cpu(); 202 203 __this_cpu_write(kvm_running_vcpu, vcpu); 204 preempt_notifier_register(&vcpu->preempt_notifier); 205 kvm_arch_vcpu_load(vcpu, cpu); 206 put_cpu(); 207 } 208 EXPORT_SYMBOL_GPL(vcpu_load); 209 210 void vcpu_put(struct kvm_vcpu *vcpu) 211 { 212 preempt_disable(); 213 kvm_arch_vcpu_put(vcpu); 214 preempt_notifier_unregister(&vcpu->preempt_notifier); 215 __this_cpu_write(kvm_running_vcpu, NULL); 216 preempt_enable(); 217 } 218 EXPORT_SYMBOL_GPL(vcpu_put); 219 220 /* TODO: merge with kvm_arch_vcpu_should_kick */ 221 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 222 { 223 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 224 225 /* 226 * We need to wait for the VCPU to reenable interrupts and get out of 227 * READING_SHADOW_PAGE_TABLES mode. 228 */ 229 if (req & KVM_REQUEST_WAIT) 230 return mode != OUTSIDE_GUEST_MODE; 231 232 /* 233 * Need to kick a running VCPU, but otherwise there is nothing to do. 234 */ 235 return mode == IN_GUEST_MODE; 236 } 237 238 static void ack_flush(void *_completed) 239 { 240 } 241 242 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 243 { 244 if (cpumask_empty(cpus)) 245 return false; 246 247 smp_call_function_many(cpus, ack_flush, NULL, wait); 248 return true; 249 } 250 251 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 252 struct cpumask *tmp, int current_cpu) 253 { 254 int cpu; 255 256 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 257 __kvm_make_request(req, vcpu); 258 259 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 260 return; 261 262 /* 263 * Note, the vCPU could get migrated to a different pCPU at any point 264 * after kvm_request_needs_ipi(), which could result in sending an IPI 265 * to the previous pCPU. But, that's OK because the purpose of the IPI 266 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 267 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 268 * after this point is also OK, as the requirement is only that KVM wait 269 * for vCPUs that were reading SPTEs _before_ any changes were 270 * finalized. See kvm_vcpu_kick() for more details on handling requests. 271 */ 272 if (kvm_request_needs_ipi(vcpu, req)) { 273 cpu = READ_ONCE(vcpu->cpu); 274 if (cpu != -1 && cpu != current_cpu) 275 __cpumask_set_cpu(cpu, tmp); 276 } 277 } 278 279 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 280 unsigned long *vcpu_bitmap) 281 { 282 struct kvm_vcpu *vcpu; 283 struct cpumask *cpus; 284 int i, me; 285 bool called; 286 287 me = get_cpu(); 288 289 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 290 cpumask_clear(cpus); 291 292 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 293 vcpu = kvm_get_vcpu(kvm, i); 294 if (!vcpu) 295 continue; 296 kvm_make_vcpu_request(vcpu, req, cpus, me); 297 } 298 299 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 300 put_cpu(); 301 302 return called; 303 } 304 305 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 306 struct kvm_vcpu *except) 307 { 308 struct kvm_vcpu *vcpu; 309 struct cpumask *cpus; 310 unsigned long i; 311 bool called; 312 int me; 313 314 me = get_cpu(); 315 316 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 317 cpumask_clear(cpus); 318 319 kvm_for_each_vcpu(i, vcpu, kvm) { 320 if (vcpu == except) 321 continue; 322 kvm_make_vcpu_request(vcpu, req, cpus, me); 323 } 324 325 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 326 put_cpu(); 327 328 return called; 329 } 330 331 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 332 { 333 return kvm_make_all_cpus_request_except(kvm, req, NULL); 334 } 335 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 336 337 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 338 void kvm_flush_remote_tlbs(struct kvm *kvm) 339 { 340 ++kvm->stat.generic.remote_tlb_flush_requests; 341 342 /* 343 * We want to publish modifications to the page tables before reading 344 * mode. Pairs with a memory barrier in arch-specific code. 345 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 346 * and smp_mb in walk_shadow_page_lockless_begin/end. 347 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 348 * 349 * There is already an smp_mb__after_atomic() before 350 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 351 * barrier here. 352 */ 353 if (!kvm_arch_flush_remote_tlb(kvm) 354 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 355 ++kvm->stat.generic.remote_tlb_flush; 356 } 357 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 358 #endif 359 360 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 361 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 362 gfp_t gfp_flags) 363 { 364 gfp_flags |= mc->gfp_zero; 365 366 if (mc->kmem_cache) 367 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 368 else 369 return (void *)__get_free_page(gfp_flags); 370 } 371 372 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 373 { 374 void *obj; 375 376 if (mc->nobjs >= min) 377 return 0; 378 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 379 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 380 if (!obj) 381 return mc->nobjs >= min ? 0 : -ENOMEM; 382 mc->objects[mc->nobjs++] = obj; 383 } 384 return 0; 385 } 386 387 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 388 { 389 return mc->nobjs; 390 } 391 392 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 393 { 394 while (mc->nobjs) { 395 if (mc->kmem_cache) 396 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 397 else 398 free_page((unsigned long)mc->objects[--mc->nobjs]); 399 } 400 } 401 402 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 403 { 404 void *p; 405 406 if (WARN_ON(!mc->nobjs)) 407 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 408 else 409 p = mc->objects[--mc->nobjs]; 410 BUG_ON(!p); 411 return p; 412 } 413 #endif 414 415 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 416 { 417 mutex_init(&vcpu->mutex); 418 vcpu->cpu = -1; 419 vcpu->kvm = kvm; 420 vcpu->vcpu_id = id; 421 vcpu->pid = NULL; 422 #ifndef __KVM_HAVE_ARCH_WQP 423 rcuwait_init(&vcpu->wait); 424 #endif 425 kvm_async_pf_vcpu_init(vcpu); 426 427 kvm_vcpu_set_in_spin_loop(vcpu, false); 428 kvm_vcpu_set_dy_eligible(vcpu, false); 429 vcpu->preempted = false; 430 vcpu->ready = false; 431 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 432 vcpu->last_used_slot = NULL; 433 } 434 435 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 436 { 437 kvm_dirty_ring_free(&vcpu->dirty_ring); 438 kvm_arch_vcpu_destroy(vcpu); 439 440 /* 441 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 442 * the vcpu->pid pointer, and at destruction time all file descriptors 443 * are already gone. 444 */ 445 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 446 447 free_page((unsigned long)vcpu->run); 448 kmem_cache_free(kvm_vcpu_cache, vcpu); 449 } 450 451 void kvm_destroy_vcpus(struct kvm *kvm) 452 { 453 unsigned long i; 454 struct kvm_vcpu *vcpu; 455 456 kvm_for_each_vcpu(i, vcpu, kvm) { 457 kvm_vcpu_destroy(vcpu); 458 xa_erase(&kvm->vcpu_array, i); 459 } 460 461 atomic_set(&kvm->online_vcpus, 0); 462 } 463 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 464 465 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 466 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 467 { 468 return container_of(mn, struct kvm, mmu_notifier); 469 } 470 471 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 472 struct mm_struct *mm, 473 unsigned long start, unsigned long end) 474 { 475 struct kvm *kvm = mmu_notifier_to_kvm(mn); 476 int idx; 477 478 idx = srcu_read_lock(&kvm->srcu); 479 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 480 srcu_read_unlock(&kvm->srcu, idx); 481 } 482 483 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 484 485 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 486 unsigned long end); 487 488 struct kvm_hva_range { 489 unsigned long start; 490 unsigned long end; 491 pte_t pte; 492 hva_handler_t handler; 493 on_lock_fn_t on_lock; 494 bool flush_on_ret; 495 bool may_block; 496 }; 497 498 /* 499 * Use a dedicated stub instead of NULL to indicate that there is no callback 500 * function/handler. The compiler technically can't guarantee that a real 501 * function will have a non-zero address, and so it will generate code to 502 * check for !NULL, whereas comparing against a stub will be elided at compile 503 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 504 */ 505 static void kvm_null_fn(void) 506 { 507 508 } 509 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 510 511 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 512 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 513 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 514 node; \ 515 node = interval_tree_iter_next(node, start, last)) \ 516 517 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 518 const struct kvm_hva_range *range) 519 { 520 bool ret = false, locked = false; 521 struct kvm_gfn_range gfn_range; 522 struct kvm_memory_slot *slot; 523 struct kvm_memslots *slots; 524 int i, idx; 525 526 if (WARN_ON_ONCE(range->end <= range->start)) 527 return 0; 528 529 /* A null handler is allowed if and only if on_lock() is provided. */ 530 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 531 IS_KVM_NULL_FN(range->handler))) 532 return 0; 533 534 idx = srcu_read_lock(&kvm->srcu); 535 536 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 537 struct interval_tree_node *node; 538 539 slots = __kvm_memslots(kvm, i); 540 kvm_for_each_memslot_in_hva_range(node, slots, 541 range->start, range->end - 1) { 542 unsigned long hva_start, hva_end; 543 544 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 545 hva_start = max(range->start, slot->userspace_addr); 546 hva_end = min(range->end, slot->userspace_addr + 547 (slot->npages << PAGE_SHIFT)); 548 549 /* 550 * To optimize for the likely case where the address 551 * range is covered by zero or one memslots, don't 552 * bother making these conditional (to avoid writes on 553 * the second or later invocation of the handler). 554 */ 555 gfn_range.pte = range->pte; 556 gfn_range.may_block = range->may_block; 557 558 /* 559 * {gfn(page) | page intersects with [hva_start, hva_end)} = 560 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 561 */ 562 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 563 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 564 gfn_range.slot = slot; 565 566 if (!locked) { 567 locked = true; 568 KVM_MMU_LOCK(kvm); 569 if (!IS_KVM_NULL_FN(range->on_lock)) 570 range->on_lock(kvm, range->start, range->end); 571 if (IS_KVM_NULL_FN(range->handler)) 572 break; 573 } 574 ret |= range->handler(kvm, &gfn_range); 575 } 576 } 577 578 if (range->flush_on_ret && ret) 579 kvm_flush_remote_tlbs(kvm); 580 581 if (locked) 582 KVM_MMU_UNLOCK(kvm); 583 584 srcu_read_unlock(&kvm->srcu, idx); 585 586 /* The notifiers are averse to booleans. :-( */ 587 return (int)ret; 588 } 589 590 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 591 unsigned long start, 592 unsigned long end, 593 pte_t pte, 594 hva_handler_t handler) 595 { 596 struct kvm *kvm = mmu_notifier_to_kvm(mn); 597 const struct kvm_hva_range range = { 598 .start = start, 599 .end = end, 600 .pte = pte, 601 .handler = handler, 602 .on_lock = (void *)kvm_null_fn, 603 .flush_on_ret = true, 604 .may_block = false, 605 }; 606 607 return __kvm_handle_hva_range(kvm, &range); 608 } 609 610 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 611 unsigned long start, 612 unsigned long end, 613 hva_handler_t handler) 614 { 615 struct kvm *kvm = mmu_notifier_to_kvm(mn); 616 const struct kvm_hva_range range = { 617 .start = start, 618 .end = end, 619 .pte = __pte(0), 620 .handler = handler, 621 .on_lock = (void *)kvm_null_fn, 622 .flush_on_ret = false, 623 .may_block = false, 624 }; 625 626 return __kvm_handle_hva_range(kvm, &range); 627 } 628 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 629 struct mm_struct *mm, 630 unsigned long address, 631 pte_t pte) 632 { 633 struct kvm *kvm = mmu_notifier_to_kvm(mn); 634 635 trace_kvm_set_spte_hva(address); 636 637 /* 638 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 639 * If mmu_notifier_count is zero, then no in-progress invalidations, 640 * including this one, found a relevant memslot at start(); rechecking 641 * memslots here is unnecessary. Note, a false positive (count elevated 642 * by a different invalidation) is sub-optimal but functionally ok. 643 */ 644 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 645 if (!READ_ONCE(kvm->mmu_notifier_count)) 646 return; 647 648 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 649 } 650 651 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 652 unsigned long end) 653 { 654 /* 655 * The count increase must become visible at unlock time as no 656 * spte can be established without taking the mmu_lock and 657 * count is also read inside the mmu_lock critical section. 658 */ 659 kvm->mmu_notifier_count++; 660 if (likely(kvm->mmu_notifier_count == 1)) { 661 kvm->mmu_notifier_range_start = start; 662 kvm->mmu_notifier_range_end = end; 663 } else { 664 /* 665 * Fully tracking multiple concurrent ranges has dimishing 666 * returns. Keep things simple and just find the minimal range 667 * which includes the current and new ranges. As there won't be 668 * enough information to subtract a range after its invalidate 669 * completes, any ranges invalidated concurrently will 670 * accumulate and persist until all outstanding invalidates 671 * complete. 672 */ 673 kvm->mmu_notifier_range_start = 674 min(kvm->mmu_notifier_range_start, start); 675 kvm->mmu_notifier_range_end = 676 max(kvm->mmu_notifier_range_end, end); 677 } 678 } 679 680 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 681 const struct mmu_notifier_range *range) 682 { 683 struct kvm *kvm = mmu_notifier_to_kvm(mn); 684 const struct kvm_hva_range hva_range = { 685 .start = range->start, 686 .end = range->end, 687 .pte = __pte(0), 688 .handler = kvm_unmap_gfn_range, 689 .on_lock = kvm_inc_notifier_count, 690 .flush_on_ret = true, 691 .may_block = mmu_notifier_range_blockable(range), 692 }; 693 694 trace_kvm_unmap_hva_range(range->start, range->end); 695 696 /* 697 * Prevent memslot modification between range_start() and range_end() 698 * so that conditionally locking provides the same result in both 699 * functions. Without that guarantee, the mmu_notifier_count 700 * adjustments will be imbalanced. 701 * 702 * Pairs with the decrement in range_end(). 703 */ 704 spin_lock(&kvm->mn_invalidate_lock); 705 kvm->mn_active_invalidate_count++; 706 spin_unlock(&kvm->mn_invalidate_lock); 707 708 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, 709 hva_range.may_block); 710 711 __kvm_handle_hva_range(kvm, &hva_range); 712 713 return 0; 714 } 715 716 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 717 unsigned long end) 718 { 719 /* 720 * This sequence increase will notify the kvm page fault that 721 * the page that is going to be mapped in the spte could have 722 * been freed. 723 */ 724 kvm->mmu_notifier_seq++; 725 smp_wmb(); 726 /* 727 * The above sequence increase must be visible before the 728 * below count decrease, which is ensured by the smp_wmb above 729 * in conjunction with the smp_rmb in mmu_notifier_retry(). 730 */ 731 kvm->mmu_notifier_count--; 732 } 733 734 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 735 const struct mmu_notifier_range *range) 736 { 737 struct kvm *kvm = mmu_notifier_to_kvm(mn); 738 const struct kvm_hva_range hva_range = { 739 .start = range->start, 740 .end = range->end, 741 .pte = __pte(0), 742 .handler = (void *)kvm_null_fn, 743 .on_lock = kvm_dec_notifier_count, 744 .flush_on_ret = false, 745 .may_block = mmu_notifier_range_blockable(range), 746 }; 747 bool wake; 748 749 __kvm_handle_hva_range(kvm, &hva_range); 750 751 /* Pairs with the increment in range_start(). */ 752 spin_lock(&kvm->mn_invalidate_lock); 753 wake = (--kvm->mn_active_invalidate_count == 0); 754 spin_unlock(&kvm->mn_invalidate_lock); 755 756 /* 757 * There can only be one waiter, since the wait happens under 758 * slots_lock. 759 */ 760 if (wake) 761 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 762 763 BUG_ON(kvm->mmu_notifier_count < 0); 764 } 765 766 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 767 struct mm_struct *mm, 768 unsigned long start, 769 unsigned long end) 770 { 771 trace_kvm_age_hva(start, end); 772 773 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 774 } 775 776 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 777 struct mm_struct *mm, 778 unsigned long start, 779 unsigned long end) 780 { 781 trace_kvm_age_hva(start, end); 782 783 /* 784 * Even though we do not flush TLB, this will still adversely 785 * affect performance on pre-Haswell Intel EPT, where there is 786 * no EPT Access Bit to clear so that we have to tear down EPT 787 * tables instead. If we find this unacceptable, we can always 788 * add a parameter to kvm_age_hva so that it effectively doesn't 789 * do anything on clear_young. 790 * 791 * Also note that currently we never issue secondary TLB flushes 792 * from clear_young, leaving this job up to the regular system 793 * cadence. If we find this inaccurate, we might come up with a 794 * more sophisticated heuristic later. 795 */ 796 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 797 } 798 799 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 800 struct mm_struct *mm, 801 unsigned long address) 802 { 803 trace_kvm_test_age_hva(address); 804 805 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 806 kvm_test_age_gfn); 807 } 808 809 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 810 struct mm_struct *mm) 811 { 812 struct kvm *kvm = mmu_notifier_to_kvm(mn); 813 int idx; 814 815 idx = srcu_read_lock(&kvm->srcu); 816 kvm_arch_flush_shadow_all(kvm); 817 srcu_read_unlock(&kvm->srcu, idx); 818 } 819 820 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 821 .invalidate_range = kvm_mmu_notifier_invalidate_range, 822 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 823 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 824 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 825 .clear_young = kvm_mmu_notifier_clear_young, 826 .test_young = kvm_mmu_notifier_test_young, 827 .change_pte = kvm_mmu_notifier_change_pte, 828 .release = kvm_mmu_notifier_release, 829 }; 830 831 static int kvm_init_mmu_notifier(struct kvm *kvm) 832 { 833 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 834 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 835 } 836 837 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 838 839 static int kvm_init_mmu_notifier(struct kvm *kvm) 840 { 841 return 0; 842 } 843 844 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 845 846 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 847 static int kvm_pm_notifier_call(struct notifier_block *bl, 848 unsigned long state, 849 void *unused) 850 { 851 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 852 853 return kvm_arch_pm_notifier(kvm, state); 854 } 855 856 static void kvm_init_pm_notifier(struct kvm *kvm) 857 { 858 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 859 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 860 kvm->pm_notifier.priority = INT_MAX; 861 register_pm_notifier(&kvm->pm_notifier); 862 } 863 864 static void kvm_destroy_pm_notifier(struct kvm *kvm) 865 { 866 unregister_pm_notifier(&kvm->pm_notifier); 867 } 868 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 869 static void kvm_init_pm_notifier(struct kvm *kvm) 870 { 871 } 872 873 static void kvm_destroy_pm_notifier(struct kvm *kvm) 874 { 875 } 876 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 877 878 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 879 { 880 if (!memslot->dirty_bitmap) 881 return; 882 883 kvfree(memslot->dirty_bitmap); 884 memslot->dirty_bitmap = NULL; 885 } 886 887 /* This does not remove the slot from struct kvm_memslots data structures */ 888 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 889 { 890 kvm_destroy_dirty_bitmap(slot); 891 892 kvm_arch_free_memslot(kvm, slot); 893 894 kfree(slot); 895 } 896 897 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 898 { 899 struct hlist_node *idnode; 900 struct kvm_memory_slot *memslot; 901 int bkt; 902 903 /* 904 * The same memslot objects live in both active and inactive sets, 905 * arbitrarily free using index '1' so the second invocation of this 906 * function isn't operating over a structure with dangling pointers 907 * (even though this function isn't actually touching them). 908 */ 909 if (!slots->node_idx) 910 return; 911 912 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 913 kvm_free_memslot(kvm, memslot); 914 } 915 916 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 917 { 918 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 919 case KVM_STATS_TYPE_INSTANT: 920 return 0444; 921 case KVM_STATS_TYPE_CUMULATIVE: 922 case KVM_STATS_TYPE_PEAK: 923 default: 924 return 0644; 925 } 926 } 927 928 929 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 930 { 931 int i; 932 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 933 kvm_vcpu_stats_header.num_desc; 934 935 if (!kvm->debugfs_dentry) 936 return; 937 938 debugfs_remove_recursive(kvm->debugfs_dentry); 939 940 if (kvm->debugfs_stat_data) { 941 for (i = 0; i < kvm_debugfs_num_entries; i++) 942 kfree(kvm->debugfs_stat_data[i]); 943 kfree(kvm->debugfs_stat_data); 944 } 945 } 946 947 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 948 { 949 static DEFINE_MUTEX(kvm_debugfs_lock); 950 struct dentry *dent; 951 char dir_name[ITOA_MAX_LEN * 2]; 952 struct kvm_stat_data *stat_data; 953 const struct _kvm_stats_desc *pdesc; 954 int i, ret; 955 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 956 kvm_vcpu_stats_header.num_desc; 957 958 if (!debugfs_initialized()) 959 return 0; 960 961 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 962 mutex_lock(&kvm_debugfs_lock); 963 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 964 if (dent) { 965 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 966 dput(dent); 967 mutex_unlock(&kvm_debugfs_lock); 968 return 0; 969 } 970 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 971 mutex_unlock(&kvm_debugfs_lock); 972 if (IS_ERR(dent)) 973 return 0; 974 975 kvm->debugfs_dentry = dent; 976 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 977 sizeof(*kvm->debugfs_stat_data), 978 GFP_KERNEL_ACCOUNT); 979 if (!kvm->debugfs_stat_data) 980 return -ENOMEM; 981 982 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 983 pdesc = &kvm_vm_stats_desc[i]; 984 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 985 if (!stat_data) 986 return -ENOMEM; 987 988 stat_data->kvm = kvm; 989 stat_data->desc = pdesc; 990 stat_data->kind = KVM_STAT_VM; 991 kvm->debugfs_stat_data[i] = stat_data; 992 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 993 kvm->debugfs_dentry, stat_data, 994 &stat_fops_per_vm); 995 } 996 997 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 998 pdesc = &kvm_vcpu_stats_desc[i]; 999 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1000 if (!stat_data) 1001 return -ENOMEM; 1002 1003 stat_data->kvm = kvm; 1004 stat_data->desc = pdesc; 1005 stat_data->kind = KVM_STAT_VCPU; 1006 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1007 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1008 kvm->debugfs_dentry, stat_data, 1009 &stat_fops_per_vm); 1010 } 1011 1012 ret = kvm_arch_create_vm_debugfs(kvm); 1013 if (ret) { 1014 kvm_destroy_vm_debugfs(kvm); 1015 return i; 1016 } 1017 1018 return 0; 1019 } 1020 1021 /* 1022 * Called after the VM is otherwise initialized, but just before adding it to 1023 * the vm_list. 1024 */ 1025 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1026 { 1027 return 0; 1028 } 1029 1030 /* 1031 * Called just after removing the VM from the vm_list, but before doing any 1032 * other destruction. 1033 */ 1034 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1035 { 1036 } 1037 1038 /* 1039 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1040 * be setup already, so we can create arch-specific debugfs entries under it. 1041 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1042 * a per-arch destroy interface is not needed. 1043 */ 1044 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1045 { 1046 return 0; 1047 } 1048 1049 static struct kvm *kvm_create_vm(unsigned long type) 1050 { 1051 struct kvm *kvm = kvm_arch_alloc_vm(); 1052 struct kvm_memslots *slots; 1053 int r = -ENOMEM; 1054 int i, j; 1055 1056 if (!kvm) 1057 return ERR_PTR(-ENOMEM); 1058 1059 KVM_MMU_LOCK_INIT(kvm); 1060 mmgrab(current->mm); 1061 kvm->mm = current->mm; 1062 kvm_eventfd_init(kvm); 1063 mutex_init(&kvm->lock); 1064 mutex_init(&kvm->irq_lock); 1065 mutex_init(&kvm->slots_lock); 1066 mutex_init(&kvm->slots_arch_lock); 1067 spin_lock_init(&kvm->mn_invalidate_lock); 1068 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1069 xa_init(&kvm->vcpu_array); 1070 1071 INIT_LIST_HEAD(&kvm->gpc_list); 1072 spin_lock_init(&kvm->gpc_lock); 1073 1074 INIT_LIST_HEAD(&kvm->devices); 1075 1076 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1077 1078 if (init_srcu_struct(&kvm->srcu)) 1079 goto out_err_no_srcu; 1080 if (init_srcu_struct(&kvm->irq_srcu)) 1081 goto out_err_no_irq_srcu; 1082 1083 refcount_set(&kvm->users_count, 1); 1084 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1085 for (j = 0; j < 2; j++) { 1086 slots = &kvm->__memslots[i][j]; 1087 1088 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1089 slots->hva_tree = RB_ROOT_CACHED; 1090 slots->gfn_tree = RB_ROOT; 1091 hash_init(slots->id_hash); 1092 slots->node_idx = j; 1093 1094 /* Generations must be different for each address space. */ 1095 slots->generation = i; 1096 } 1097 1098 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1099 } 1100 1101 for (i = 0; i < KVM_NR_BUSES; i++) { 1102 rcu_assign_pointer(kvm->buses[i], 1103 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1104 if (!kvm->buses[i]) 1105 goto out_err_no_arch_destroy_vm; 1106 } 1107 1108 kvm->max_halt_poll_ns = halt_poll_ns; 1109 1110 r = kvm_arch_init_vm(kvm, type); 1111 if (r) 1112 goto out_err_no_arch_destroy_vm; 1113 1114 r = hardware_enable_all(); 1115 if (r) 1116 goto out_err_no_disable; 1117 1118 #ifdef CONFIG_HAVE_KVM_IRQFD 1119 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1120 #endif 1121 1122 r = kvm_init_mmu_notifier(kvm); 1123 if (r) 1124 goto out_err_no_mmu_notifier; 1125 1126 r = kvm_arch_post_init_vm(kvm); 1127 if (r) 1128 goto out_err; 1129 1130 mutex_lock(&kvm_lock); 1131 list_add(&kvm->vm_list, &vm_list); 1132 mutex_unlock(&kvm_lock); 1133 1134 preempt_notifier_inc(); 1135 kvm_init_pm_notifier(kvm); 1136 1137 /* 1138 * When the fd passed to this ioctl() is opened it pins the module, 1139 * but try_module_get() also prevents getting a reference if the module 1140 * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait"). 1141 */ 1142 if (!try_module_get(kvm_chardev_ops.owner)) { 1143 r = -ENODEV; 1144 goto out_err; 1145 } 1146 1147 return kvm; 1148 1149 out_err: 1150 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1151 if (kvm->mmu_notifier.ops) 1152 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1153 #endif 1154 out_err_no_mmu_notifier: 1155 hardware_disable_all(); 1156 out_err_no_disable: 1157 kvm_arch_destroy_vm(kvm); 1158 out_err_no_arch_destroy_vm: 1159 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1160 for (i = 0; i < KVM_NR_BUSES; i++) 1161 kfree(kvm_get_bus(kvm, i)); 1162 cleanup_srcu_struct(&kvm->irq_srcu); 1163 out_err_no_irq_srcu: 1164 cleanup_srcu_struct(&kvm->srcu); 1165 out_err_no_srcu: 1166 kvm_arch_free_vm(kvm); 1167 mmdrop(current->mm); 1168 return ERR_PTR(r); 1169 } 1170 1171 static void kvm_destroy_devices(struct kvm *kvm) 1172 { 1173 struct kvm_device *dev, *tmp; 1174 1175 /* 1176 * We do not need to take the kvm->lock here, because nobody else 1177 * has a reference to the struct kvm at this point and therefore 1178 * cannot access the devices list anyhow. 1179 */ 1180 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1181 list_del(&dev->vm_node); 1182 dev->ops->destroy(dev); 1183 } 1184 } 1185 1186 static void kvm_destroy_vm(struct kvm *kvm) 1187 { 1188 int i; 1189 struct mm_struct *mm = kvm->mm; 1190 1191 kvm_destroy_pm_notifier(kvm); 1192 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1193 kvm_destroy_vm_debugfs(kvm); 1194 kvm_arch_sync_events(kvm); 1195 mutex_lock(&kvm_lock); 1196 list_del(&kvm->vm_list); 1197 mutex_unlock(&kvm_lock); 1198 kvm_arch_pre_destroy_vm(kvm); 1199 1200 kvm_free_irq_routing(kvm); 1201 for (i = 0; i < KVM_NR_BUSES; i++) { 1202 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1203 1204 if (bus) 1205 kvm_io_bus_destroy(bus); 1206 kvm->buses[i] = NULL; 1207 } 1208 kvm_coalesced_mmio_free(kvm); 1209 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1210 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1211 /* 1212 * At this point, pending calls to invalidate_range_start() 1213 * have completed but no more MMU notifiers will run, so 1214 * mn_active_invalidate_count may remain unbalanced. 1215 * No threads can be waiting in install_new_memslots as the 1216 * last reference on KVM has been dropped, but freeing 1217 * memslots would deadlock without this manual intervention. 1218 */ 1219 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1220 kvm->mn_active_invalidate_count = 0; 1221 #else 1222 kvm_arch_flush_shadow_all(kvm); 1223 #endif 1224 kvm_arch_destroy_vm(kvm); 1225 kvm_destroy_devices(kvm); 1226 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1227 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1228 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1229 } 1230 cleanup_srcu_struct(&kvm->irq_srcu); 1231 cleanup_srcu_struct(&kvm->srcu); 1232 kvm_arch_free_vm(kvm); 1233 preempt_notifier_dec(); 1234 hardware_disable_all(); 1235 mmdrop(mm); 1236 module_put(kvm_chardev_ops.owner); 1237 } 1238 1239 void kvm_get_kvm(struct kvm *kvm) 1240 { 1241 refcount_inc(&kvm->users_count); 1242 } 1243 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1244 1245 /* 1246 * Make sure the vm is not during destruction, which is a safe version of 1247 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1248 */ 1249 bool kvm_get_kvm_safe(struct kvm *kvm) 1250 { 1251 return refcount_inc_not_zero(&kvm->users_count); 1252 } 1253 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1254 1255 void kvm_put_kvm(struct kvm *kvm) 1256 { 1257 if (refcount_dec_and_test(&kvm->users_count)) 1258 kvm_destroy_vm(kvm); 1259 } 1260 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1261 1262 /* 1263 * Used to put a reference that was taken on behalf of an object associated 1264 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1265 * of the new file descriptor fails and the reference cannot be transferred to 1266 * its final owner. In such cases, the caller is still actively using @kvm and 1267 * will fail miserably if the refcount unexpectedly hits zero. 1268 */ 1269 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1270 { 1271 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1272 } 1273 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1274 1275 static int kvm_vm_release(struct inode *inode, struct file *filp) 1276 { 1277 struct kvm *kvm = filp->private_data; 1278 1279 kvm_irqfd_release(kvm); 1280 1281 kvm_put_kvm(kvm); 1282 return 0; 1283 } 1284 1285 /* 1286 * Allocation size is twice as large as the actual dirty bitmap size. 1287 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1288 */ 1289 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1290 { 1291 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1292 1293 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1294 if (!memslot->dirty_bitmap) 1295 return -ENOMEM; 1296 1297 return 0; 1298 } 1299 1300 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1301 { 1302 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1303 int node_idx_inactive = active->node_idx ^ 1; 1304 1305 return &kvm->__memslots[as_id][node_idx_inactive]; 1306 } 1307 1308 /* 1309 * Helper to get the address space ID when one of memslot pointers may be NULL. 1310 * This also serves as a sanity that at least one of the pointers is non-NULL, 1311 * and that their address space IDs don't diverge. 1312 */ 1313 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1314 struct kvm_memory_slot *b) 1315 { 1316 if (WARN_ON_ONCE(!a && !b)) 1317 return 0; 1318 1319 if (!a) 1320 return b->as_id; 1321 if (!b) 1322 return a->as_id; 1323 1324 WARN_ON_ONCE(a->as_id != b->as_id); 1325 return a->as_id; 1326 } 1327 1328 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1329 struct kvm_memory_slot *slot) 1330 { 1331 struct rb_root *gfn_tree = &slots->gfn_tree; 1332 struct rb_node **node, *parent; 1333 int idx = slots->node_idx; 1334 1335 parent = NULL; 1336 for (node = &gfn_tree->rb_node; *node; ) { 1337 struct kvm_memory_slot *tmp; 1338 1339 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1340 parent = *node; 1341 if (slot->base_gfn < tmp->base_gfn) 1342 node = &(*node)->rb_left; 1343 else if (slot->base_gfn > tmp->base_gfn) 1344 node = &(*node)->rb_right; 1345 else 1346 BUG(); 1347 } 1348 1349 rb_link_node(&slot->gfn_node[idx], parent, node); 1350 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1351 } 1352 1353 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1354 struct kvm_memory_slot *slot) 1355 { 1356 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1357 } 1358 1359 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1360 struct kvm_memory_slot *old, 1361 struct kvm_memory_slot *new) 1362 { 1363 int idx = slots->node_idx; 1364 1365 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1366 1367 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1368 &slots->gfn_tree); 1369 } 1370 1371 /* 1372 * Replace @old with @new in the inactive memslots. 1373 * 1374 * With NULL @old this simply adds @new. 1375 * With NULL @new this simply removes @old. 1376 * 1377 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1378 * appropriately. 1379 */ 1380 static void kvm_replace_memslot(struct kvm *kvm, 1381 struct kvm_memory_slot *old, 1382 struct kvm_memory_slot *new) 1383 { 1384 int as_id = kvm_memslots_get_as_id(old, new); 1385 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1386 int idx = slots->node_idx; 1387 1388 if (old) { 1389 hash_del(&old->id_node[idx]); 1390 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1391 1392 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1393 atomic_long_set(&slots->last_used_slot, (long)new); 1394 1395 if (!new) { 1396 kvm_erase_gfn_node(slots, old); 1397 return; 1398 } 1399 } 1400 1401 /* 1402 * Initialize @new's hva range. Do this even when replacing an @old 1403 * slot, kvm_copy_memslot() deliberately does not touch node data. 1404 */ 1405 new->hva_node[idx].start = new->userspace_addr; 1406 new->hva_node[idx].last = new->userspace_addr + 1407 (new->npages << PAGE_SHIFT) - 1; 1408 1409 /* 1410 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1411 * hva_node needs to be swapped with remove+insert even though hva can't 1412 * change when replacing an existing slot. 1413 */ 1414 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1415 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1416 1417 /* 1418 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1419 * switch the node in the gfn tree instead of removing the old and 1420 * inserting the new as two separate operations. Replacement is a 1421 * single O(1) operation versus two O(log(n)) operations for 1422 * remove+insert. 1423 */ 1424 if (old && old->base_gfn == new->base_gfn) { 1425 kvm_replace_gfn_node(slots, old, new); 1426 } else { 1427 if (old) 1428 kvm_erase_gfn_node(slots, old); 1429 kvm_insert_gfn_node(slots, new); 1430 } 1431 } 1432 1433 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1434 { 1435 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1436 1437 #ifdef __KVM_HAVE_READONLY_MEM 1438 valid_flags |= KVM_MEM_READONLY; 1439 #endif 1440 1441 if (mem->flags & ~valid_flags) 1442 return -EINVAL; 1443 1444 return 0; 1445 } 1446 1447 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1448 { 1449 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1450 1451 /* Grab the generation from the activate memslots. */ 1452 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1453 1454 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1455 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1456 1457 /* 1458 * Do not store the new memslots while there are invalidations in 1459 * progress, otherwise the locking in invalidate_range_start and 1460 * invalidate_range_end will be unbalanced. 1461 */ 1462 spin_lock(&kvm->mn_invalidate_lock); 1463 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1464 while (kvm->mn_active_invalidate_count) { 1465 set_current_state(TASK_UNINTERRUPTIBLE); 1466 spin_unlock(&kvm->mn_invalidate_lock); 1467 schedule(); 1468 spin_lock(&kvm->mn_invalidate_lock); 1469 } 1470 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1471 rcu_assign_pointer(kvm->memslots[as_id], slots); 1472 spin_unlock(&kvm->mn_invalidate_lock); 1473 1474 /* 1475 * Acquired in kvm_set_memslot. Must be released before synchronize 1476 * SRCU below in order to avoid deadlock with another thread 1477 * acquiring the slots_arch_lock in an srcu critical section. 1478 */ 1479 mutex_unlock(&kvm->slots_arch_lock); 1480 1481 synchronize_srcu_expedited(&kvm->srcu); 1482 1483 /* 1484 * Increment the new memslot generation a second time, dropping the 1485 * update in-progress flag and incrementing the generation based on 1486 * the number of address spaces. This provides a unique and easily 1487 * identifiable generation number while the memslots are in flux. 1488 */ 1489 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1490 1491 /* 1492 * Generations must be unique even across address spaces. We do not need 1493 * a global counter for that, instead the generation space is evenly split 1494 * across address spaces. For example, with two address spaces, address 1495 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1496 * use generations 1, 3, 5, ... 1497 */ 1498 gen += KVM_ADDRESS_SPACE_NUM; 1499 1500 kvm_arch_memslots_updated(kvm, gen); 1501 1502 slots->generation = gen; 1503 } 1504 1505 static int kvm_prepare_memory_region(struct kvm *kvm, 1506 const struct kvm_memory_slot *old, 1507 struct kvm_memory_slot *new, 1508 enum kvm_mr_change change) 1509 { 1510 int r; 1511 1512 /* 1513 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1514 * will be freed on "commit". If logging is enabled in both old and 1515 * new, reuse the existing bitmap. If logging is enabled only in the 1516 * new and KVM isn't using a ring buffer, allocate and initialize a 1517 * new bitmap. 1518 */ 1519 if (change != KVM_MR_DELETE) { 1520 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1521 new->dirty_bitmap = NULL; 1522 else if (old && old->dirty_bitmap) 1523 new->dirty_bitmap = old->dirty_bitmap; 1524 else if (!kvm->dirty_ring_size) { 1525 r = kvm_alloc_dirty_bitmap(new); 1526 if (r) 1527 return r; 1528 1529 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1530 bitmap_set(new->dirty_bitmap, 0, new->npages); 1531 } 1532 } 1533 1534 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1535 1536 /* Free the bitmap on failure if it was allocated above. */ 1537 if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap) 1538 kvm_destroy_dirty_bitmap(new); 1539 1540 return r; 1541 } 1542 1543 static void kvm_commit_memory_region(struct kvm *kvm, 1544 struct kvm_memory_slot *old, 1545 const struct kvm_memory_slot *new, 1546 enum kvm_mr_change change) 1547 { 1548 /* 1549 * Update the total number of memslot pages before calling the arch 1550 * hook so that architectures can consume the result directly. 1551 */ 1552 if (change == KVM_MR_DELETE) 1553 kvm->nr_memslot_pages -= old->npages; 1554 else if (change == KVM_MR_CREATE) 1555 kvm->nr_memslot_pages += new->npages; 1556 1557 kvm_arch_commit_memory_region(kvm, old, new, change); 1558 1559 switch (change) { 1560 case KVM_MR_CREATE: 1561 /* Nothing more to do. */ 1562 break; 1563 case KVM_MR_DELETE: 1564 /* Free the old memslot and all its metadata. */ 1565 kvm_free_memslot(kvm, old); 1566 break; 1567 case KVM_MR_MOVE: 1568 case KVM_MR_FLAGS_ONLY: 1569 /* 1570 * Free the dirty bitmap as needed; the below check encompasses 1571 * both the flags and whether a ring buffer is being used) 1572 */ 1573 if (old->dirty_bitmap && !new->dirty_bitmap) 1574 kvm_destroy_dirty_bitmap(old); 1575 1576 /* 1577 * The final quirk. Free the detached, old slot, but only its 1578 * memory, not any metadata. Metadata, including arch specific 1579 * data, may be reused by @new. 1580 */ 1581 kfree(old); 1582 break; 1583 default: 1584 BUG(); 1585 } 1586 } 1587 1588 /* 1589 * Activate @new, which must be installed in the inactive slots by the caller, 1590 * by swapping the active slots and then propagating @new to @old once @old is 1591 * unreachable and can be safely modified. 1592 * 1593 * With NULL @old this simply adds @new to @active (while swapping the sets). 1594 * With NULL @new this simply removes @old from @active and frees it 1595 * (while also swapping the sets). 1596 */ 1597 static void kvm_activate_memslot(struct kvm *kvm, 1598 struct kvm_memory_slot *old, 1599 struct kvm_memory_slot *new) 1600 { 1601 int as_id = kvm_memslots_get_as_id(old, new); 1602 1603 kvm_swap_active_memslots(kvm, as_id); 1604 1605 /* Propagate the new memslot to the now inactive memslots. */ 1606 kvm_replace_memslot(kvm, old, new); 1607 } 1608 1609 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1610 const struct kvm_memory_slot *src) 1611 { 1612 dest->base_gfn = src->base_gfn; 1613 dest->npages = src->npages; 1614 dest->dirty_bitmap = src->dirty_bitmap; 1615 dest->arch = src->arch; 1616 dest->userspace_addr = src->userspace_addr; 1617 dest->flags = src->flags; 1618 dest->id = src->id; 1619 dest->as_id = src->as_id; 1620 } 1621 1622 static void kvm_invalidate_memslot(struct kvm *kvm, 1623 struct kvm_memory_slot *old, 1624 struct kvm_memory_slot *invalid_slot) 1625 { 1626 /* 1627 * Mark the current slot INVALID. As with all memslot modifications, 1628 * this must be done on an unreachable slot to avoid modifying the 1629 * current slot in the active tree. 1630 */ 1631 kvm_copy_memslot(invalid_slot, old); 1632 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1633 kvm_replace_memslot(kvm, old, invalid_slot); 1634 1635 /* 1636 * Activate the slot that is now marked INVALID, but don't propagate 1637 * the slot to the now inactive slots. The slot is either going to be 1638 * deleted or recreated as a new slot. 1639 */ 1640 kvm_swap_active_memslots(kvm, old->as_id); 1641 1642 /* 1643 * From this point no new shadow pages pointing to a deleted, or moved, 1644 * memslot will be created. Validation of sp->gfn happens in: 1645 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1646 * - kvm_is_visible_gfn (mmu_check_root) 1647 */ 1648 kvm_arch_flush_shadow_memslot(kvm, old); 1649 1650 /* Was released by kvm_swap_active_memslots, reacquire. */ 1651 mutex_lock(&kvm->slots_arch_lock); 1652 1653 /* 1654 * Copy the arch-specific field of the newly-installed slot back to the 1655 * old slot as the arch data could have changed between releasing 1656 * slots_arch_lock in install_new_memslots() and re-acquiring the lock 1657 * above. Writers are required to retrieve memslots *after* acquiring 1658 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1659 */ 1660 old->arch = invalid_slot->arch; 1661 } 1662 1663 static void kvm_create_memslot(struct kvm *kvm, 1664 struct kvm_memory_slot *new) 1665 { 1666 /* Add the new memslot to the inactive set and activate. */ 1667 kvm_replace_memslot(kvm, NULL, new); 1668 kvm_activate_memslot(kvm, NULL, new); 1669 } 1670 1671 static void kvm_delete_memslot(struct kvm *kvm, 1672 struct kvm_memory_slot *old, 1673 struct kvm_memory_slot *invalid_slot) 1674 { 1675 /* 1676 * Remove the old memslot (in the inactive memslots) by passing NULL as 1677 * the "new" slot, and for the invalid version in the active slots. 1678 */ 1679 kvm_replace_memslot(kvm, old, NULL); 1680 kvm_activate_memslot(kvm, invalid_slot, NULL); 1681 } 1682 1683 static void kvm_move_memslot(struct kvm *kvm, 1684 struct kvm_memory_slot *old, 1685 struct kvm_memory_slot *new, 1686 struct kvm_memory_slot *invalid_slot) 1687 { 1688 /* 1689 * Replace the old memslot in the inactive slots, and then swap slots 1690 * and replace the current INVALID with the new as well. 1691 */ 1692 kvm_replace_memslot(kvm, old, new); 1693 kvm_activate_memslot(kvm, invalid_slot, new); 1694 } 1695 1696 static void kvm_update_flags_memslot(struct kvm *kvm, 1697 struct kvm_memory_slot *old, 1698 struct kvm_memory_slot *new) 1699 { 1700 /* 1701 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1702 * an intermediate step. Instead, the old memslot is simply replaced 1703 * with a new, updated copy in both memslot sets. 1704 */ 1705 kvm_replace_memslot(kvm, old, new); 1706 kvm_activate_memslot(kvm, old, new); 1707 } 1708 1709 static int kvm_set_memslot(struct kvm *kvm, 1710 struct kvm_memory_slot *old, 1711 struct kvm_memory_slot *new, 1712 enum kvm_mr_change change) 1713 { 1714 struct kvm_memory_slot *invalid_slot; 1715 int r; 1716 1717 /* 1718 * Released in kvm_swap_active_memslots. 1719 * 1720 * Must be held from before the current memslots are copied until 1721 * after the new memslots are installed with rcu_assign_pointer, 1722 * then released before the synchronize srcu in kvm_swap_active_memslots. 1723 * 1724 * When modifying memslots outside of the slots_lock, must be held 1725 * before reading the pointer to the current memslots until after all 1726 * changes to those memslots are complete. 1727 * 1728 * These rules ensure that installing new memslots does not lose 1729 * changes made to the previous memslots. 1730 */ 1731 mutex_lock(&kvm->slots_arch_lock); 1732 1733 /* 1734 * Invalidate the old slot if it's being deleted or moved. This is 1735 * done prior to actually deleting/moving the memslot to allow vCPUs to 1736 * continue running by ensuring there are no mappings or shadow pages 1737 * for the memslot when it is deleted/moved. Without pre-invalidation 1738 * (and without a lock), a window would exist between effecting the 1739 * delete/move and committing the changes in arch code where KVM or a 1740 * guest could access a non-existent memslot. 1741 * 1742 * Modifications are done on a temporary, unreachable slot. The old 1743 * slot needs to be preserved in case a later step fails and the 1744 * invalidation needs to be reverted. 1745 */ 1746 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1747 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1748 if (!invalid_slot) { 1749 mutex_unlock(&kvm->slots_arch_lock); 1750 return -ENOMEM; 1751 } 1752 kvm_invalidate_memslot(kvm, old, invalid_slot); 1753 } 1754 1755 r = kvm_prepare_memory_region(kvm, old, new, change); 1756 if (r) { 1757 /* 1758 * For DELETE/MOVE, revert the above INVALID change. No 1759 * modifications required since the original slot was preserved 1760 * in the inactive slots. Changing the active memslots also 1761 * release slots_arch_lock. 1762 */ 1763 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1764 kvm_activate_memslot(kvm, invalid_slot, old); 1765 kfree(invalid_slot); 1766 } else { 1767 mutex_unlock(&kvm->slots_arch_lock); 1768 } 1769 return r; 1770 } 1771 1772 /* 1773 * For DELETE and MOVE, the working slot is now active as the INVALID 1774 * version of the old slot. MOVE is particularly special as it reuses 1775 * the old slot and returns a copy of the old slot (in working_slot). 1776 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1777 * old slot is detached but otherwise preserved. 1778 */ 1779 if (change == KVM_MR_CREATE) 1780 kvm_create_memslot(kvm, new); 1781 else if (change == KVM_MR_DELETE) 1782 kvm_delete_memslot(kvm, old, invalid_slot); 1783 else if (change == KVM_MR_MOVE) 1784 kvm_move_memslot(kvm, old, new, invalid_slot); 1785 else if (change == KVM_MR_FLAGS_ONLY) 1786 kvm_update_flags_memslot(kvm, old, new); 1787 else 1788 BUG(); 1789 1790 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1791 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1792 kfree(invalid_slot); 1793 1794 /* 1795 * No need to refresh new->arch, changes after dropping slots_arch_lock 1796 * will directly hit the final, active memsot. Architectures are 1797 * responsible for knowing that new->arch may be stale. 1798 */ 1799 kvm_commit_memory_region(kvm, old, new, change); 1800 1801 return 0; 1802 } 1803 1804 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1805 gfn_t start, gfn_t end) 1806 { 1807 struct kvm_memslot_iter iter; 1808 1809 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1810 if (iter.slot->id != id) 1811 return true; 1812 } 1813 1814 return false; 1815 } 1816 1817 /* 1818 * Allocate some memory and give it an address in the guest physical address 1819 * space. 1820 * 1821 * Discontiguous memory is allowed, mostly for framebuffers. 1822 * 1823 * Must be called holding kvm->slots_lock for write. 1824 */ 1825 int __kvm_set_memory_region(struct kvm *kvm, 1826 const struct kvm_userspace_memory_region *mem) 1827 { 1828 struct kvm_memory_slot *old, *new; 1829 struct kvm_memslots *slots; 1830 enum kvm_mr_change change; 1831 unsigned long npages; 1832 gfn_t base_gfn; 1833 int as_id, id; 1834 int r; 1835 1836 r = check_memory_region_flags(mem); 1837 if (r) 1838 return r; 1839 1840 as_id = mem->slot >> 16; 1841 id = (u16)mem->slot; 1842 1843 /* General sanity checks */ 1844 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1845 (mem->memory_size != (unsigned long)mem->memory_size)) 1846 return -EINVAL; 1847 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1848 return -EINVAL; 1849 /* We can read the guest memory with __xxx_user() later on. */ 1850 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1851 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1852 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1853 mem->memory_size)) 1854 return -EINVAL; 1855 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1856 return -EINVAL; 1857 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1858 return -EINVAL; 1859 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1860 return -EINVAL; 1861 1862 slots = __kvm_memslots(kvm, as_id); 1863 1864 /* 1865 * Note, the old memslot (and the pointer itself!) may be invalidated 1866 * and/or destroyed by kvm_set_memslot(). 1867 */ 1868 old = id_to_memslot(slots, id); 1869 1870 if (!mem->memory_size) { 1871 if (!old || !old->npages) 1872 return -EINVAL; 1873 1874 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1875 return -EIO; 1876 1877 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1878 } 1879 1880 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1881 npages = (mem->memory_size >> PAGE_SHIFT); 1882 1883 if (!old || !old->npages) { 1884 change = KVM_MR_CREATE; 1885 1886 /* 1887 * To simplify KVM internals, the total number of pages across 1888 * all memslots must fit in an unsigned long. 1889 */ 1890 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 1891 return -EINVAL; 1892 } else { /* Modify an existing slot. */ 1893 if ((mem->userspace_addr != old->userspace_addr) || 1894 (npages != old->npages) || 1895 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 1896 return -EINVAL; 1897 1898 if (base_gfn != old->base_gfn) 1899 change = KVM_MR_MOVE; 1900 else if (mem->flags != old->flags) 1901 change = KVM_MR_FLAGS_ONLY; 1902 else /* Nothing to change. */ 1903 return 0; 1904 } 1905 1906 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 1907 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 1908 return -EEXIST; 1909 1910 /* Allocate a slot that will persist in the memslot. */ 1911 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 1912 if (!new) 1913 return -ENOMEM; 1914 1915 new->as_id = as_id; 1916 new->id = id; 1917 new->base_gfn = base_gfn; 1918 new->npages = npages; 1919 new->flags = mem->flags; 1920 new->userspace_addr = mem->userspace_addr; 1921 1922 r = kvm_set_memslot(kvm, old, new, change); 1923 if (r) 1924 kfree(new); 1925 return r; 1926 } 1927 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1928 1929 int kvm_set_memory_region(struct kvm *kvm, 1930 const struct kvm_userspace_memory_region *mem) 1931 { 1932 int r; 1933 1934 mutex_lock(&kvm->slots_lock); 1935 r = __kvm_set_memory_region(kvm, mem); 1936 mutex_unlock(&kvm->slots_lock); 1937 return r; 1938 } 1939 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1940 1941 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1942 struct kvm_userspace_memory_region *mem) 1943 { 1944 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1945 return -EINVAL; 1946 1947 return kvm_set_memory_region(kvm, mem); 1948 } 1949 1950 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1951 /** 1952 * kvm_get_dirty_log - get a snapshot of dirty pages 1953 * @kvm: pointer to kvm instance 1954 * @log: slot id and address to which we copy the log 1955 * @is_dirty: set to '1' if any dirty pages were found 1956 * @memslot: set to the associated memslot, always valid on success 1957 */ 1958 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1959 int *is_dirty, struct kvm_memory_slot **memslot) 1960 { 1961 struct kvm_memslots *slots; 1962 int i, as_id, id; 1963 unsigned long n; 1964 unsigned long any = 0; 1965 1966 /* Dirty ring tracking is exclusive to dirty log tracking */ 1967 if (kvm->dirty_ring_size) 1968 return -ENXIO; 1969 1970 *memslot = NULL; 1971 *is_dirty = 0; 1972 1973 as_id = log->slot >> 16; 1974 id = (u16)log->slot; 1975 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1976 return -EINVAL; 1977 1978 slots = __kvm_memslots(kvm, as_id); 1979 *memslot = id_to_memslot(slots, id); 1980 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1981 return -ENOENT; 1982 1983 kvm_arch_sync_dirty_log(kvm, *memslot); 1984 1985 n = kvm_dirty_bitmap_bytes(*memslot); 1986 1987 for (i = 0; !any && i < n/sizeof(long); ++i) 1988 any = (*memslot)->dirty_bitmap[i]; 1989 1990 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1991 return -EFAULT; 1992 1993 if (any) 1994 *is_dirty = 1; 1995 return 0; 1996 } 1997 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1998 1999 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2000 /** 2001 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2002 * and reenable dirty page tracking for the corresponding pages. 2003 * @kvm: pointer to kvm instance 2004 * @log: slot id and address to which we copy the log 2005 * 2006 * We need to keep it in mind that VCPU threads can write to the bitmap 2007 * concurrently. So, to avoid losing track of dirty pages we keep the 2008 * following order: 2009 * 2010 * 1. Take a snapshot of the bit and clear it if needed. 2011 * 2. Write protect the corresponding page. 2012 * 3. Copy the snapshot to the userspace. 2013 * 4. Upon return caller flushes TLB's if needed. 2014 * 2015 * Between 2 and 4, the guest may write to the page using the remaining TLB 2016 * entry. This is not a problem because the page is reported dirty using 2017 * the snapshot taken before and step 4 ensures that writes done after 2018 * exiting to userspace will be logged for the next call. 2019 * 2020 */ 2021 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2022 { 2023 struct kvm_memslots *slots; 2024 struct kvm_memory_slot *memslot; 2025 int i, as_id, id; 2026 unsigned long n; 2027 unsigned long *dirty_bitmap; 2028 unsigned long *dirty_bitmap_buffer; 2029 bool flush; 2030 2031 /* Dirty ring tracking is exclusive to dirty log tracking */ 2032 if (kvm->dirty_ring_size) 2033 return -ENXIO; 2034 2035 as_id = log->slot >> 16; 2036 id = (u16)log->slot; 2037 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2038 return -EINVAL; 2039 2040 slots = __kvm_memslots(kvm, as_id); 2041 memslot = id_to_memslot(slots, id); 2042 if (!memslot || !memslot->dirty_bitmap) 2043 return -ENOENT; 2044 2045 dirty_bitmap = memslot->dirty_bitmap; 2046 2047 kvm_arch_sync_dirty_log(kvm, memslot); 2048 2049 n = kvm_dirty_bitmap_bytes(memslot); 2050 flush = false; 2051 if (kvm->manual_dirty_log_protect) { 2052 /* 2053 * Unlike kvm_get_dirty_log, we always return false in *flush, 2054 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2055 * is some code duplication between this function and 2056 * kvm_get_dirty_log, but hopefully all architecture 2057 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2058 * can be eliminated. 2059 */ 2060 dirty_bitmap_buffer = dirty_bitmap; 2061 } else { 2062 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2063 memset(dirty_bitmap_buffer, 0, n); 2064 2065 KVM_MMU_LOCK(kvm); 2066 for (i = 0; i < n / sizeof(long); i++) { 2067 unsigned long mask; 2068 gfn_t offset; 2069 2070 if (!dirty_bitmap[i]) 2071 continue; 2072 2073 flush = true; 2074 mask = xchg(&dirty_bitmap[i], 0); 2075 dirty_bitmap_buffer[i] = mask; 2076 2077 offset = i * BITS_PER_LONG; 2078 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2079 offset, mask); 2080 } 2081 KVM_MMU_UNLOCK(kvm); 2082 } 2083 2084 if (flush) 2085 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2086 2087 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2088 return -EFAULT; 2089 return 0; 2090 } 2091 2092 2093 /** 2094 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2095 * @kvm: kvm instance 2096 * @log: slot id and address to which we copy the log 2097 * 2098 * Steps 1-4 below provide general overview of dirty page logging. See 2099 * kvm_get_dirty_log_protect() function description for additional details. 2100 * 2101 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2102 * always flush the TLB (step 4) even if previous step failed and the dirty 2103 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2104 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2105 * writes will be marked dirty for next log read. 2106 * 2107 * 1. Take a snapshot of the bit and clear it if needed. 2108 * 2. Write protect the corresponding page. 2109 * 3. Copy the snapshot to the userspace. 2110 * 4. Flush TLB's if needed. 2111 */ 2112 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2113 struct kvm_dirty_log *log) 2114 { 2115 int r; 2116 2117 mutex_lock(&kvm->slots_lock); 2118 2119 r = kvm_get_dirty_log_protect(kvm, log); 2120 2121 mutex_unlock(&kvm->slots_lock); 2122 return r; 2123 } 2124 2125 /** 2126 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2127 * and reenable dirty page tracking for the corresponding pages. 2128 * @kvm: pointer to kvm instance 2129 * @log: slot id and address from which to fetch the bitmap of dirty pages 2130 */ 2131 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2132 struct kvm_clear_dirty_log *log) 2133 { 2134 struct kvm_memslots *slots; 2135 struct kvm_memory_slot *memslot; 2136 int as_id, id; 2137 gfn_t offset; 2138 unsigned long i, n; 2139 unsigned long *dirty_bitmap; 2140 unsigned long *dirty_bitmap_buffer; 2141 bool flush; 2142 2143 /* Dirty ring tracking is exclusive to dirty log tracking */ 2144 if (kvm->dirty_ring_size) 2145 return -ENXIO; 2146 2147 as_id = log->slot >> 16; 2148 id = (u16)log->slot; 2149 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2150 return -EINVAL; 2151 2152 if (log->first_page & 63) 2153 return -EINVAL; 2154 2155 slots = __kvm_memslots(kvm, as_id); 2156 memslot = id_to_memslot(slots, id); 2157 if (!memslot || !memslot->dirty_bitmap) 2158 return -ENOENT; 2159 2160 dirty_bitmap = memslot->dirty_bitmap; 2161 2162 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2163 2164 if (log->first_page > memslot->npages || 2165 log->num_pages > memslot->npages - log->first_page || 2166 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2167 return -EINVAL; 2168 2169 kvm_arch_sync_dirty_log(kvm, memslot); 2170 2171 flush = false; 2172 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2173 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2174 return -EFAULT; 2175 2176 KVM_MMU_LOCK(kvm); 2177 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2178 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2179 i++, offset += BITS_PER_LONG) { 2180 unsigned long mask = *dirty_bitmap_buffer++; 2181 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2182 if (!mask) 2183 continue; 2184 2185 mask &= atomic_long_fetch_andnot(mask, p); 2186 2187 /* 2188 * mask contains the bits that really have been cleared. This 2189 * never includes any bits beyond the length of the memslot (if 2190 * the length is not aligned to 64 pages), therefore it is not 2191 * a problem if userspace sets them in log->dirty_bitmap. 2192 */ 2193 if (mask) { 2194 flush = true; 2195 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2196 offset, mask); 2197 } 2198 } 2199 KVM_MMU_UNLOCK(kvm); 2200 2201 if (flush) 2202 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 2203 2204 return 0; 2205 } 2206 2207 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2208 struct kvm_clear_dirty_log *log) 2209 { 2210 int r; 2211 2212 mutex_lock(&kvm->slots_lock); 2213 2214 r = kvm_clear_dirty_log_protect(kvm, log); 2215 2216 mutex_unlock(&kvm->slots_lock); 2217 return r; 2218 } 2219 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2220 2221 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2222 { 2223 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2224 } 2225 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2226 2227 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2228 { 2229 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2230 u64 gen = slots->generation; 2231 struct kvm_memory_slot *slot; 2232 2233 /* 2234 * This also protects against using a memslot from a different address space, 2235 * since different address spaces have different generation numbers. 2236 */ 2237 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2238 vcpu->last_used_slot = NULL; 2239 vcpu->last_used_slot_gen = gen; 2240 } 2241 2242 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2243 if (slot) 2244 return slot; 2245 2246 /* 2247 * Fall back to searching all memslots. We purposely use 2248 * search_memslots() instead of __gfn_to_memslot() to avoid 2249 * thrashing the VM-wide last_used_slot in kvm_memslots. 2250 */ 2251 slot = search_memslots(slots, gfn, false); 2252 if (slot) { 2253 vcpu->last_used_slot = slot; 2254 return slot; 2255 } 2256 2257 return NULL; 2258 } 2259 2260 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2261 { 2262 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2263 2264 return kvm_is_visible_memslot(memslot); 2265 } 2266 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2267 2268 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2269 { 2270 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2271 2272 return kvm_is_visible_memslot(memslot); 2273 } 2274 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2275 2276 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2277 { 2278 struct vm_area_struct *vma; 2279 unsigned long addr, size; 2280 2281 size = PAGE_SIZE; 2282 2283 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2284 if (kvm_is_error_hva(addr)) 2285 return PAGE_SIZE; 2286 2287 mmap_read_lock(current->mm); 2288 vma = find_vma(current->mm, addr); 2289 if (!vma) 2290 goto out; 2291 2292 size = vma_kernel_pagesize(vma); 2293 2294 out: 2295 mmap_read_unlock(current->mm); 2296 2297 return size; 2298 } 2299 2300 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2301 { 2302 return slot->flags & KVM_MEM_READONLY; 2303 } 2304 2305 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2306 gfn_t *nr_pages, bool write) 2307 { 2308 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2309 return KVM_HVA_ERR_BAD; 2310 2311 if (memslot_is_readonly(slot) && write) 2312 return KVM_HVA_ERR_RO_BAD; 2313 2314 if (nr_pages) 2315 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2316 2317 return __gfn_to_hva_memslot(slot, gfn); 2318 } 2319 2320 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2321 gfn_t *nr_pages) 2322 { 2323 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2324 } 2325 2326 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2327 gfn_t gfn) 2328 { 2329 return gfn_to_hva_many(slot, gfn, NULL); 2330 } 2331 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2332 2333 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2334 { 2335 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2336 } 2337 EXPORT_SYMBOL_GPL(gfn_to_hva); 2338 2339 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2340 { 2341 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2342 } 2343 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2344 2345 /* 2346 * Return the hva of a @gfn and the R/W attribute if possible. 2347 * 2348 * @slot: the kvm_memory_slot which contains @gfn 2349 * @gfn: the gfn to be translated 2350 * @writable: used to return the read/write attribute of the @slot if the hva 2351 * is valid and @writable is not NULL 2352 */ 2353 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2354 gfn_t gfn, bool *writable) 2355 { 2356 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2357 2358 if (!kvm_is_error_hva(hva) && writable) 2359 *writable = !memslot_is_readonly(slot); 2360 2361 return hva; 2362 } 2363 2364 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2365 { 2366 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2367 2368 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2369 } 2370 2371 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2372 { 2373 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2374 2375 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2376 } 2377 2378 static inline int check_user_page_hwpoison(unsigned long addr) 2379 { 2380 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2381 2382 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2383 return rc == -EHWPOISON; 2384 } 2385 2386 /* 2387 * The fast path to get the writable pfn which will be stored in @pfn, 2388 * true indicates success, otherwise false is returned. It's also the 2389 * only part that runs if we can in atomic context. 2390 */ 2391 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2392 bool *writable, kvm_pfn_t *pfn) 2393 { 2394 struct page *page[1]; 2395 2396 /* 2397 * Fast pin a writable pfn only if it is a write fault request 2398 * or the caller allows to map a writable pfn for a read fault 2399 * request. 2400 */ 2401 if (!(write_fault || writable)) 2402 return false; 2403 2404 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2405 *pfn = page_to_pfn(page[0]); 2406 2407 if (writable) 2408 *writable = true; 2409 return true; 2410 } 2411 2412 return false; 2413 } 2414 2415 /* 2416 * The slow path to get the pfn of the specified host virtual address, 2417 * 1 indicates success, -errno is returned if error is detected. 2418 */ 2419 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2420 bool *writable, kvm_pfn_t *pfn) 2421 { 2422 unsigned int flags = FOLL_HWPOISON; 2423 struct page *page; 2424 int npages = 0; 2425 2426 might_sleep(); 2427 2428 if (writable) 2429 *writable = write_fault; 2430 2431 if (write_fault) 2432 flags |= FOLL_WRITE; 2433 if (async) 2434 flags |= FOLL_NOWAIT; 2435 2436 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2437 if (npages != 1) 2438 return npages; 2439 2440 /* map read fault as writable if possible */ 2441 if (unlikely(!write_fault) && writable) { 2442 struct page *wpage; 2443 2444 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2445 *writable = true; 2446 put_page(page); 2447 page = wpage; 2448 } 2449 } 2450 *pfn = page_to_pfn(page); 2451 return npages; 2452 } 2453 2454 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2455 { 2456 if (unlikely(!(vma->vm_flags & VM_READ))) 2457 return false; 2458 2459 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2460 return false; 2461 2462 return true; 2463 } 2464 2465 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2466 { 2467 if (kvm_is_reserved_pfn(pfn)) 2468 return 1; 2469 return get_page_unless_zero(pfn_to_page(pfn)); 2470 } 2471 2472 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2473 unsigned long addr, bool write_fault, 2474 bool *writable, kvm_pfn_t *p_pfn) 2475 { 2476 kvm_pfn_t pfn; 2477 pte_t *ptep; 2478 spinlock_t *ptl; 2479 int r; 2480 2481 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2482 if (r) { 2483 /* 2484 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2485 * not call the fault handler, so do it here. 2486 */ 2487 bool unlocked = false; 2488 r = fixup_user_fault(current->mm, addr, 2489 (write_fault ? FAULT_FLAG_WRITE : 0), 2490 &unlocked); 2491 if (unlocked) 2492 return -EAGAIN; 2493 if (r) 2494 return r; 2495 2496 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2497 if (r) 2498 return r; 2499 } 2500 2501 if (write_fault && !pte_write(*ptep)) { 2502 pfn = KVM_PFN_ERR_RO_FAULT; 2503 goto out; 2504 } 2505 2506 if (writable) 2507 *writable = pte_write(*ptep); 2508 pfn = pte_pfn(*ptep); 2509 2510 /* 2511 * Get a reference here because callers of *hva_to_pfn* and 2512 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2513 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2514 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2515 * simply do nothing for reserved pfns. 2516 * 2517 * Whoever called remap_pfn_range is also going to call e.g. 2518 * unmap_mapping_range before the underlying pages are freed, 2519 * causing a call to our MMU notifier. 2520 * 2521 * Certain IO or PFNMAP mappings can be backed with valid 2522 * struct pages, but be allocated without refcounting e.g., 2523 * tail pages of non-compound higher order allocations, which 2524 * would then underflow the refcount when the caller does the 2525 * required put_page. Don't allow those pages here. 2526 */ 2527 if (!kvm_try_get_pfn(pfn)) 2528 r = -EFAULT; 2529 2530 out: 2531 pte_unmap_unlock(ptep, ptl); 2532 *p_pfn = pfn; 2533 2534 return r; 2535 } 2536 2537 /* 2538 * Pin guest page in memory and return its pfn. 2539 * @addr: host virtual address which maps memory to the guest 2540 * @atomic: whether this function can sleep 2541 * @async: whether this function need to wait IO complete if the 2542 * host page is not in the memory 2543 * @write_fault: whether we should get a writable host page 2544 * @writable: whether it allows to map a writable host page for !@write_fault 2545 * 2546 * The function will map a writable host page for these two cases: 2547 * 1): @write_fault = true 2548 * 2): @write_fault = false && @writable, @writable will tell the caller 2549 * whether the mapping is writable. 2550 */ 2551 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2552 bool write_fault, bool *writable) 2553 { 2554 struct vm_area_struct *vma; 2555 kvm_pfn_t pfn = 0; 2556 int npages, r; 2557 2558 /* we can do it either atomically or asynchronously, not both */ 2559 BUG_ON(atomic && async); 2560 2561 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2562 return pfn; 2563 2564 if (atomic) 2565 return KVM_PFN_ERR_FAULT; 2566 2567 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2568 if (npages == 1) 2569 return pfn; 2570 2571 mmap_read_lock(current->mm); 2572 if (npages == -EHWPOISON || 2573 (!async && check_user_page_hwpoison(addr))) { 2574 pfn = KVM_PFN_ERR_HWPOISON; 2575 goto exit; 2576 } 2577 2578 retry: 2579 vma = vma_lookup(current->mm, addr); 2580 2581 if (vma == NULL) 2582 pfn = KVM_PFN_ERR_FAULT; 2583 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2584 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); 2585 if (r == -EAGAIN) 2586 goto retry; 2587 if (r < 0) 2588 pfn = KVM_PFN_ERR_FAULT; 2589 } else { 2590 if (async && vma_is_valid(vma, write_fault)) 2591 *async = true; 2592 pfn = KVM_PFN_ERR_FAULT; 2593 } 2594 exit: 2595 mmap_read_unlock(current->mm); 2596 return pfn; 2597 } 2598 2599 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2600 bool atomic, bool *async, bool write_fault, 2601 bool *writable, hva_t *hva) 2602 { 2603 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2604 2605 if (hva) 2606 *hva = addr; 2607 2608 if (addr == KVM_HVA_ERR_RO_BAD) { 2609 if (writable) 2610 *writable = false; 2611 return KVM_PFN_ERR_RO_FAULT; 2612 } 2613 2614 if (kvm_is_error_hva(addr)) { 2615 if (writable) 2616 *writable = false; 2617 return KVM_PFN_NOSLOT; 2618 } 2619 2620 /* Do not map writable pfn in the readonly memslot. */ 2621 if (writable && memslot_is_readonly(slot)) { 2622 *writable = false; 2623 writable = NULL; 2624 } 2625 2626 return hva_to_pfn(addr, atomic, async, write_fault, 2627 writable); 2628 } 2629 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2630 2631 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2632 bool *writable) 2633 { 2634 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2635 write_fault, writable, NULL); 2636 } 2637 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2638 2639 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2640 { 2641 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2642 } 2643 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2644 2645 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2646 { 2647 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2648 } 2649 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2650 2651 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2652 { 2653 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2654 } 2655 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2656 2657 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2658 { 2659 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2660 } 2661 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2662 2663 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2664 { 2665 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2666 } 2667 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2668 2669 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2670 struct page **pages, int nr_pages) 2671 { 2672 unsigned long addr; 2673 gfn_t entry = 0; 2674 2675 addr = gfn_to_hva_many(slot, gfn, &entry); 2676 if (kvm_is_error_hva(addr)) 2677 return -1; 2678 2679 if (entry < nr_pages) 2680 return 0; 2681 2682 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2683 } 2684 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2685 2686 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2687 { 2688 if (is_error_noslot_pfn(pfn)) 2689 return KVM_ERR_PTR_BAD_PAGE; 2690 2691 if (kvm_is_reserved_pfn(pfn)) { 2692 WARN_ON(1); 2693 return KVM_ERR_PTR_BAD_PAGE; 2694 } 2695 2696 return pfn_to_page(pfn); 2697 } 2698 2699 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2700 { 2701 kvm_pfn_t pfn; 2702 2703 pfn = gfn_to_pfn(kvm, gfn); 2704 2705 return kvm_pfn_to_page(pfn); 2706 } 2707 EXPORT_SYMBOL_GPL(gfn_to_page); 2708 2709 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2710 { 2711 if (pfn == 0) 2712 return; 2713 2714 if (dirty) 2715 kvm_release_pfn_dirty(pfn); 2716 else 2717 kvm_release_pfn_clean(pfn); 2718 } 2719 2720 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2721 { 2722 kvm_pfn_t pfn; 2723 void *hva = NULL; 2724 struct page *page = KVM_UNMAPPED_PAGE; 2725 2726 if (!map) 2727 return -EINVAL; 2728 2729 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2730 if (is_error_noslot_pfn(pfn)) 2731 return -EINVAL; 2732 2733 if (pfn_valid(pfn)) { 2734 page = pfn_to_page(pfn); 2735 hva = kmap(page); 2736 #ifdef CONFIG_HAS_IOMEM 2737 } else { 2738 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2739 #endif 2740 } 2741 2742 if (!hva) 2743 return -EFAULT; 2744 2745 map->page = page; 2746 map->hva = hva; 2747 map->pfn = pfn; 2748 map->gfn = gfn; 2749 2750 return 0; 2751 } 2752 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2753 2754 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2755 { 2756 if (!map) 2757 return; 2758 2759 if (!map->hva) 2760 return; 2761 2762 if (map->page != KVM_UNMAPPED_PAGE) 2763 kunmap(map->page); 2764 #ifdef CONFIG_HAS_IOMEM 2765 else 2766 memunmap(map->hva); 2767 #endif 2768 2769 if (dirty) 2770 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2771 2772 kvm_release_pfn(map->pfn, dirty); 2773 2774 map->hva = NULL; 2775 map->page = NULL; 2776 } 2777 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2778 2779 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2780 { 2781 kvm_pfn_t pfn; 2782 2783 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2784 2785 return kvm_pfn_to_page(pfn); 2786 } 2787 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2788 2789 void kvm_release_page_clean(struct page *page) 2790 { 2791 WARN_ON(is_error_page(page)); 2792 2793 kvm_release_pfn_clean(page_to_pfn(page)); 2794 } 2795 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2796 2797 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2798 { 2799 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2800 put_page(pfn_to_page(pfn)); 2801 } 2802 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2803 2804 void kvm_release_page_dirty(struct page *page) 2805 { 2806 WARN_ON(is_error_page(page)); 2807 2808 kvm_release_pfn_dirty(page_to_pfn(page)); 2809 } 2810 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2811 2812 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2813 { 2814 kvm_set_pfn_dirty(pfn); 2815 kvm_release_pfn_clean(pfn); 2816 } 2817 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2818 2819 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2820 { 2821 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2822 SetPageDirty(pfn_to_page(pfn)); 2823 } 2824 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2825 2826 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2827 { 2828 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2829 mark_page_accessed(pfn_to_page(pfn)); 2830 } 2831 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2832 2833 static int next_segment(unsigned long len, int offset) 2834 { 2835 if (len > PAGE_SIZE - offset) 2836 return PAGE_SIZE - offset; 2837 else 2838 return len; 2839 } 2840 2841 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2842 void *data, int offset, int len) 2843 { 2844 int r; 2845 unsigned long addr; 2846 2847 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2848 if (kvm_is_error_hva(addr)) 2849 return -EFAULT; 2850 r = __copy_from_user(data, (void __user *)addr + offset, len); 2851 if (r) 2852 return -EFAULT; 2853 return 0; 2854 } 2855 2856 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2857 int len) 2858 { 2859 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2860 2861 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2862 } 2863 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2864 2865 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2866 int offset, int len) 2867 { 2868 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2869 2870 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2871 } 2872 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2873 2874 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2875 { 2876 gfn_t gfn = gpa >> PAGE_SHIFT; 2877 int seg; 2878 int offset = offset_in_page(gpa); 2879 int ret; 2880 2881 while ((seg = next_segment(len, offset)) != 0) { 2882 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2883 if (ret < 0) 2884 return ret; 2885 offset = 0; 2886 len -= seg; 2887 data += seg; 2888 ++gfn; 2889 } 2890 return 0; 2891 } 2892 EXPORT_SYMBOL_GPL(kvm_read_guest); 2893 2894 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2895 { 2896 gfn_t gfn = gpa >> PAGE_SHIFT; 2897 int seg; 2898 int offset = offset_in_page(gpa); 2899 int ret; 2900 2901 while ((seg = next_segment(len, offset)) != 0) { 2902 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2903 if (ret < 0) 2904 return ret; 2905 offset = 0; 2906 len -= seg; 2907 data += seg; 2908 ++gfn; 2909 } 2910 return 0; 2911 } 2912 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2913 2914 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2915 void *data, int offset, unsigned long len) 2916 { 2917 int r; 2918 unsigned long addr; 2919 2920 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2921 if (kvm_is_error_hva(addr)) 2922 return -EFAULT; 2923 pagefault_disable(); 2924 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2925 pagefault_enable(); 2926 if (r) 2927 return -EFAULT; 2928 return 0; 2929 } 2930 2931 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2932 void *data, unsigned long len) 2933 { 2934 gfn_t gfn = gpa >> PAGE_SHIFT; 2935 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2936 int offset = offset_in_page(gpa); 2937 2938 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2939 } 2940 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2941 2942 static int __kvm_write_guest_page(struct kvm *kvm, 2943 struct kvm_memory_slot *memslot, gfn_t gfn, 2944 const void *data, int offset, int len) 2945 { 2946 int r; 2947 unsigned long addr; 2948 2949 addr = gfn_to_hva_memslot(memslot, gfn); 2950 if (kvm_is_error_hva(addr)) 2951 return -EFAULT; 2952 r = __copy_to_user((void __user *)addr + offset, data, len); 2953 if (r) 2954 return -EFAULT; 2955 mark_page_dirty_in_slot(kvm, memslot, gfn); 2956 return 0; 2957 } 2958 2959 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2960 const void *data, int offset, int len) 2961 { 2962 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2963 2964 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2965 } 2966 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2967 2968 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2969 const void *data, int offset, int len) 2970 { 2971 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2972 2973 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2974 } 2975 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2976 2977 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2978 unsigned long len) 2979 { 2980 gfn_t gfn = gpa >> PAGE_SHIFT; 2981 int seg; 2982 int offset = offset_in_page(gpa); 2983 int ret; 2984 2985 while ((seg = next_segment(len, offset)) != 0) { 2986 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2987 if (ret < 0) 2988 return ret; 2989 offset = 0; 2990 len -= seg; 2991 data += seg; 2992 ++gfn; 2993 } 2994 return 0; 2995 } 2996 EXPORT_SYMBOL_GPL(kvm_write_guest); 2997 2998 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2999 unsigned long len) 3000 { 3001 gfn_t gfn = gpa >> PAGE_SHIFT; 3002 int seg; 3003 int offset = offset_in_page(gpa); 3004 int ret; 3005 3006 while ((seg = next_segment(len, offset)) != 0) { 3007 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3008 if (ret < 0) 3009 return ret; 3010 offset = 0; 3011 len -= seg; 3012 data += seg; 3013 ++gfn; 3014 } 3015 return 0; 3016 } 3017 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3018 3019 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3020 struct gfn_to_hva_cache *ghc, 3021 gpa_t gpa, unsigned long len) 3022 { 3023 int offset = offset_in_page(gpa); 3024 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3025 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3026 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3027 gfn_t nr_pages_avail; 3028 3029 /* Update ghc->generation before performing any error checks. */ 3030 ghc->generation = slots->generation; 3031 3032 if (start_gfn > end_gfn) { 3033 ghc->hva = KVM_HVA_ERR_BAD; 3034 return -EINVAL; 3035 } 3036 3037 /* 3038 * If the requested region crosses two memslots, we still 3039 * verify that the entire region is valid here. 3040 */ 3041 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3042 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3043 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3044 &nr_pages_avail); 3045 if (kvm_is_error_hva(ghc->hva)) 3046 return -EFAULT; 3047 } 3048 3049 /* Use the slow path for cross page reads and writes. */ 3050 if (nr_pages_needed == 1) 3051 ghc->hva += offset; 3052 else 3053 ghc->memslot = NULL; 3054 3055 ghc->gpa = gpa; 3056 ghc->len = len; 3057 return 0; 3058 } 3059 3060 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3061 gpa_t gpa, unsigned long len) 3062 { 3063 struct kvm_memslots *slots = kvm_memslots(kvm); 3064 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3065 } 3066 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3067 3068 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3069 void *data, unsigned int offset, 3070 unsigned long len) 3071 { 3072 struct kvm_memslots *slots = kvm_memslots(kvm); 3073 int r; 3074 gpa_t gpa = ghc->gpa + offset; 3075 3076 if (WARN_ON_ONCE(len + offset > ghc->len)) 3077 return -EINVAL; 3078 3079 if (slots->generation != ghc->generation) { 3080 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3081 return -EFAULT; 3082 } 3083 3084 if (kvm_is_error_hva(ghc->hva)) 3085 return -EFAULT; 3086 3087 if (unlikely(!ghc->memslot)) 3088 return kvm_write_guest(kvm, gpa, data, len); 3089 3090 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3091 if (r) 3092 return -EFAULT; 3093 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3094 3095 return 0; 3096 } 3097 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3098 3099 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3100 void *data, unsigned long len) 3101 { 3102 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3103 } 3104 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3105 3106 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3107 void *data, unsigned int offset, 3108 unsigned long len) 3109 { 3110 struct kvm_memslots *slots = kvm_memslots(kvm); 3111 int r; 3112 gpa_t gpa = ghc->gpa + offset; 3113 3114 if (WARN_ON_ONCE(len + offset > ghc->len)) 3115 return -EINVAL; 3116 3117 if (slots->generation != ghc->generation) { 3118 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3119 return -EFAULT; 3120 } 3121 3122 if (kvm_is_error_hva(ghc->hva)) 3123 return -EFAULT; 3124 3125 if (unlikely(!ghc->memslot)) 3126 return kvm_read_guest(kvm, gpa, data, len); 3127 3128 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3129 if (r) 3130 return -EFAULT; 3131 3132 return 0; 3133 } 3134 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3135 3136 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3137 void *data, unsigned long len) 3138 { 3139 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3140 } 3141 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3142 3143 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3144 { 3145 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3146 gfn_t gfn = gpa >> PAGE_SHIFT; 3147 int seg; 3148 int offset = offset_in_page(gpa); 3149 int ret; 3150 3151 while ((seg = next_segment(len, offset)) != 0) { 3152 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3153 if (ret < 0) 3154 return ret; 3155 offset = 0; 3156 len -= seg; 3157 ++gfn; 3158 } 3159 return 0; 3160 } 3161 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3162 3163 void mark_page_dirty_in_slot(struct kvm *kvm, 3164 const struct kvm_memory_slot *memslot, 3165 gfn_t gfn) 3166 { 3167 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3168 3169 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3170 if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) 3171 return; 3172 #endif 3173 3174 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3175 unsigned long rel_gfn = gfn - memslot->base_gfn; 3176 u32 slot = (memslot->as_id << 16) | memslot->id; 3177 3178 if (kvm->dirty_ring_size) 3179 kvm_dirty_ring_push(&vcpu->dirty_ring, 3180 slot, rel_gfn); 3181 else 3182 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3183 } 3184 } 3185 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3186 3187 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3188 { 3189 struct kvm_memory_slot *memslot; 3190 3191 memslot = gfn_to_memslot(kvm, gfn); 3192 mark_page_dirty_in_slot(kvm, memslot, gfn); 3193 } 3194 EXPORT_SYMBOL_GPL(mark_page_dirty); 3195 3196 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3197 { 3198 struct kvm_memory_slot *memslot; 3199 3200 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3201 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3202 } 3203 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3204 3205 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3206 { 3207 if (!vcpu->sigset_active) 3208 return; 3209 3210 /* 3211 * This does a lockless modification of ->real_blocked, which is fine 3212 * because, only current can change ->real_blocked and all readers of 3213 * ->real_blocked don't care as long ->real_blocked is always a subset 3214 * of ->blocked. 3215 */ 3216 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3217 } 3218 3219 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3220 { 3221 if (!vcpu->sigset_active) 3222 return; 3223 3224 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3225 sigemptyset(¤t->real_blocked); 3226 } 3227 3228 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3229 { 3230 unsigned int old, val, grow, grow_start; 3231 3232 old = val = vcpu->halt_poll_ns; 3233 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3234 grow = READ_ONCE(halt_poll_ns_grow); 3235 if (!grow) 3236 goto out; 3237 3238 val *= grow; 3239 if (val < grow_start) 3240 val = grow_start; 3241 3242 if (val > vcpu->kvm->max_halt_poll_ns) 3243 val = vcpu->kvm->max_halt_poll_ns; 3244 3245 vcpu->halt_poll_ns = val; 3246 out: 3247 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3248 } 3249 3250 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3251 { 3252 unsigned int old, val, shrink, grow_start; 3253 3254 old = val = vcpu->halt_poll_ns; 3255 shrink = READ_ONCE(halt_poll_ns_shrink); 3256 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3257 if (shrink == 0) 3258 val = 0; 3259 else 3260 val /= shrink; 3261 3262 if (val < grow_start) 3263 val = 0; 3264 3265 vcpu->halt_poll_ns = val; 3266 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3267 } 3268 3269 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3270 { 3271 int ret = -EINTR; 3272 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3273 3274 if (kvm_arch_vcpu_runnable(vcpu)) { 3275 kvm_make_request(KVM_REQ_UNHALT, vcpu); 3276 goto out; 3277 } 3278 if (kvm_cpu_has_pending_timer(vcpu)) 3279 goto out; 3280 if (signal_pending(current)) 3281 goto out; 3282 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3283 goto out; 3284 3285 ret = 0; 3286 out: 3287 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3288 return ret; 3289 } 3290 3291 /* 3292 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3293 * pending. This is mostly used when halting a vCPU, but may also be used 3294 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3295 */ 3296 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3297 { 3298 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3299 bool waited = false; 3300 3301 vcpu->stat.generic.blocking = 1; 3302 3303 kvm_arch_vcpu_blocking(vcpu); 3304 3305 prepare_to_rcuwait(wait); 3306 for (;;) { 3307 set_current_state(TASK_INTERRUPTIBLE); 3308 3309 if (kvm_vcpu_check_block(vcpu) < 0) 3310 break; 3311 3312 waited = true; 3313 schedule(); 3314 } 3315 finish_rcuwait(wait); 3316 3317 kvm_arch_vcpu_unblocking(vcpu); 3318 3319 vcpu->stat.generic.blocking = 0; 3320 3321 return waited; 3322 } 3323 3324 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3325 ktime_t end, bool success) 3326 { 3327 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3328 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3329 3330 ++vcpu->stat.generic.halt_attempted_poll; 3331 3332 if (success) { 3333 ++vcpu->stat.generic.halt_successful_poll; 3334 3335 if (!vcpu_valid_wakeup(vcpu)) 3336 ++vcpu->stat.generic.halt_poll_invalid; 3337 3338 stats->halt_poll_success_ns += poll_ns; 3339 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3340 } else { 3341 stats->halt_poll_fail_ns += poll_ns; 3342 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3343 } 3344 } 3345 3346 /* 3347 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3348 * polling is enabled, busy wait for a short time before blocking to avoid the 3349 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3350 * is halted. 3351 */ 3352 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3353 { 3354 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3355 bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3356 ktime_t start, cur, poll_end; 3357 bool waited = false; 3358 u64 halt_ns; 3359 3360 start = cur = poll_end = ktime_get(); 3361 if (do_halt_poll) { 3362 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3363 3364 do { 3365 /* 3366 * This sets KVM_REQ_UNHALT if an interrupt 3367 * arrives. 3368 */ 3369 if (kvm_vcpu_check_block(vcpu) < 0) 3370 goto out; 3371 cpu_relax(); 3372 poll_end = cur = ktime_get(); 3373 } while (kvm_vcpu_can_poll(cur, stop)); 3374 } 3375 3376 waited = kvm_vcpu_block(vcpu); 3377 3378 cur = ktime_get(); 3379 if (waited) { 3380 vcpu->stat.generic.halt_wait_ns += 3381 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3382 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3383 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3384 } 3385 out: 3386 /* The total time the vCPU was "halted", including polling time. */ 3387 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3388 3389 /* 3390 * Note, halt-polling is considered successful so long as the vCPU was 3391 * never actually scheduled out, i.e. even if the wake event arrived 3392 * after of the halt-polling loop itself, but before the full wait. 3393 */ 3394 if (do_halt_poll) 3395 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3396 3397 if (halt_poll_allowed) { 3398 if (!vcpu_valid_wakeup(vcpu)) { 3399 shrink_halt_poll_ns(vcpu); 3400 } else if (vcpu->kvm->max_halt_poll_ns) { 3401 if (halt_ns <= vcpu->halt_poll_ns) 3402 ; 3403 /* we had a long block, shrink polling */ 3404 else if (vcpu->halt_poll_ns && 3405 halt_ns > vcpu->kvm->max_halt_poll_ns) 3406 shrink_halt_poll_ns(vcpu); 3407 /* we had a short halt and our poll time is too small */ 3408 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3409 halt_ns < vcpu->kvm->max_halt_poll_ns) 3410 grow_halt_poll_ns(vcpu); 3411 } else { 3412 vcpu->halt_poll_ns = 0; 3413 } 3414 } 3415 3416 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3417 } 3418 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3419 3420 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3421 { 3422 if (__kvm_vcpu_wake_up(vcpu)) { 3423 WRITE_ONCE(vcpu->ready, true); 3424 ++vcpu->stat.generic.halt_wakeup; 3425 return true; 3426 } 3427 3428 return false; 3429 } 3430 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3431 3432 #ifndef CONFIG_S390 3433 /* 3434 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3435 */ 3436 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3437 { 3438 int me, cpu; 3439 3440 if (kvm_vcpu_wake_up(vcpu)) 3441 return; 3442 3443 me = get_cpu(); 3444 /* 3445 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3446 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3447 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3448 * within the vCPU thread itself. 3449 */ 3450 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3451 if (vcpu->mode == IN_GUEST_MODE) 3452 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3453 goto out; 3454 } 3455 3456 /* 3457 * Note, the vCPU could get migrated to a different pCPU at any point 3458 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3459 * IPI to the previous pCPU. But, that's ok because the purpose of the 3460 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3461 * vCPU also requires it to leave IN_GUEST_MODE. 3462 */ 3463 if (kvm_arch_vcpu_should_kick(vcpu)) { 3464 cpu = READ_ONCE(vcpu->cpu); 3465 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3466 smp_send_reschedule(cpu); 3467 } 3468 out: 3469 put_cpu(); 3470 } 3471 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3472 #endif /* !CONFIG_S390 */ 3473 3474 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3475 { 3476 struct pid *pid; 3477 struct task_struct *task = NULL; 3478 int ret = 0; 3479 3480 rcu_read_lock(); 3481 pid = rcu_dereference(target->pid); 3482 if (pid) 3483 task = get_pid_task(pid, PIDTYPE_PID); 3484 rcu_read_unlock(); 3485 if (!task) 3486 return ret; 3487 ret = yield_to(task, 1); 3488 put_task_struct(task); 3489 3490 return ret; 3491 } 3492 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3493 3494 /* 3495 * Helper that checks whether a VCPU is eligible for directed yield. 3496 * Most eligible candidate to yield is decided by following heuristics: 3497 * 3498 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3499 * (preempted lock holder), indicated by @in_spin_loop. 3500 * Set at the beginning and cleared at the end of interception/PLE handler. 3501 * 3502 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3503 * chance last time (mostly it has become eligible now since we have probably 3504 * yielded to lockholder in last iteration. This is done by toggling 3505 * @dy_eligible each time a VCPU checked for eligibility.) 3506 * 3507 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3508 * to preempted lock-holder could result in wrong VCPU selection and CPU 3509 * burning. Giving priority for a potential lock-holder increases lock 3510 * progress. 3511 * 3512 * Since algorithm is based on heuristics, accessing another VCPU data without 3513 * locking does not harm. It may result in trying to yield to same VCPU, fail 3514 * and continue with next VCPU and so on. 3515 */ 3516 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3517 { 3518 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3519 bool eligible; 3520 3521 eligible = !vcpu->spin_loop.in_spin_loop || 3522 vcpu->spin_loop.dy_eligible; 3523 3524 if (vcpu->spin_loop.in_spin_loop) 3525 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3526 3527 return eligible; 3528 #else 3529 return true; 3530 #endif 3531 } 3532 3533 /* 3534 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3535 * a vcpu_load/vcpu_put pair. However, for most architectures 3536 * kvm_arch_vcpu_runnable does not require vcpu_load. 3537 */ 3538 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3539 { 3540 return kvm_arch_vcpu_runnable(vcpu); 3541 } 3542 3543 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3544 { 3545 if (kvm_arch_dy_runnable(vcpu)) 3546 return true; 3547 3548 #ifdef CONFIG_KVM_ASYNC_PF 3549 if (!list_empty_careful(&vcpu->async_pf.done)) 3550 return true; 3551 #endif 3552 3553 return false; 3554 } 3555 3556 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3557 { 3558 return false; 3559 } 3560 3561 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3562 { 3563 struct kvm *kvm = me->kvm; 3564 struct kvm_vcpu *vcpu; 3565 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3566 unsigned long i; 3567 int yielded = 0; 3568 int try = 3; 3569 int pass; 3570 3571 kvm_vcpu_set_in_spin_loop(me, true); 3572 /* 3573 * We boost the priority of a VCPU that is runnable but not 3574 * currently running, because it got preempted by something 3575 * else and called schedule in __vcpu_run. Hopefully that 3576 * VCPU is holding the lock that we need and will release it. 3577 * We approximate round-robin by starting at the last boosted VCPU. 3578 */ 3579 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3580 kvm_for_each_vcpu(i, vcpu, kvm) { 3581 if (!pass && i <= last_boosted_vcpu) { 3582 i = last_boosted_vcpu; 3583 continue; 3584 } else if (pass && i > last_boosted_vcpu) 3585 break; 3586 if (!READ_ONCE(vcpu->ready)) 3587 continue; 3588 if (vcpu == me) 3589 continue; 3590 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3591 continue; 3592 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3593 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3594 !kvm_arch_vcpu_in_kernel(vcpu)) 3595 continue; 3596 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3597 continue; 3598 3599 yielded = kvm_vcpu_yield_to(vcpu); 3600 if (yielded > 0) { 3601 kvm->last_boosted_vcpu = i; 3602 break; 3603 } else if (yielded < 0) { 3604 try--; 3605 if (!try) 3606 break; 3607 } 3608 } 3609 } 3610 kvm_vcpu_set_in_spin_loop(me, false); 3611 3612 /* Ensure vcpu is not eligible during next spinloop */ 3613 kvm_vcpu_set_dy_eligible(me, false); 3614 } 3615 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3616 3617 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3618 { 3619 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3620 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3621 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3622 kvm->dirty_ring_size / PAGE_SIZE); 3623 #else 3624 return false; 3625 #endif 3626 } 3627 3628 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3629 { 3630 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3631 struct page *page; 3632 3633 if (vmf->pgoff == 0) 3634 page = virt_to_page(vcpu->run); 3635 #ifdef CONFIG_X86 3636 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3637 page = virt_to_page(vcpu->arch.pio_data); 3638 #endif 3639 #ifdef CONFIG_KVM_MMIO 3640 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3641 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3642 #endif 3643 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3644 page = kvm_dirty_ring_get_page( 3645 &vcpu->dirty_ring, 3646 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3647 else 3648 return kvm_arch_vcpu_fault(vcpu, vmf); 3649 get_page(page); 3650 vmf->page = page; 3651 return 0; 3652 } 3653 3654 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3655 .fault = kvm_vcpu_fault, 3656 }; 3657 3658 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3659 { 3660 struct kvm_vcpu *vcpu = file->private_data; 3661 unsigned long pages = vma_pages(vma); 3662 3663 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3664 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3665 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3666 return -EINVAL; 3667 3668 vma->vm_ops = &kvm_vcpu_vm_ops; 3669 return 0; 3670 } 3671 3672 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3673 { 3674 struct kvm_vcpu *vcpu = filp->private_data; 3675 3676 kvm_put_kvm(vcpu->kvm); 3677 return 0; 3678 } 3679 3680 static const struct file_operations kvm_vcpu_fops = { 3681 .release = kvm_vcpu_release, 3682 .unlocked_ioctl = kvm_vcpu_ioctl, 3683 .mmap = kvm_vcpu_mmap, 3684 .llseek = noop_llseek, 3685 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3686 }; 3687 3688 /* 3689 * Allocates an inode for the vcpu. 3690 */ 3691 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3692 { 3693 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3694 3695 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3696 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3697 } 3698 3699 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3700 { 3701 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3702 struct dentry *debugfs_dentry; 3703 char dir_name[ITOA_MAX_LEN * 2]; 3704 3705 if (!debugfs_initialized()) 3706 return; 3707 3708 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3709 debugfs_dentry = debugfs_create_dir(dir_name, 3710 vcpu->kvm->debugfs_dentry); 3711 3712 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3713 #endif 3714 } 3715 3716 /* 3717 * Creates some virtual cpus. Good luck creating more than one. 3718 */ 3719 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3720 { 3721 int r; 3722 struct kvm_vcpu *vcpu; 3723 struct page *page; 3724 3725 if (id >= KVM_MAX_VCPU_IDS) 3726 return -EINVAL; 3727 3728 mutex_lock(&kvm->lock); 3729 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3730 mutex_unlock(&kvm->lock); 3731 return -EINVAL; 3732 } 3733 3734 kvm->created_vcpus++; 3735 mutex_unlock(&kvm->lock); 3736 3737 r = kvm_arch_vcpu_precreate(kvm, id); 3738 if (r) 3739 goto vcpu_decrement; 3740 3741 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3742 if (!vcpu) { 3743 r = -ENOMEM; 3744 goto vcpu_decrement; 3745 } 3746 3747 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3748 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3749 if (!page) { 3750 r = -ENOMEM; 3751 goto vcpu_free; 3752 } 3753 vcpu->run = page_address(page); 3754 3755 kvm_vcpu_init(vcpu, kvm, id); 3756 3757 r = kvm_arch_vcpu_create(vcpu); 3758 if (r) 3759 goto vcpu_free_run_page; 3760 3761 if (kvm->dirty_ring_size) { 3762 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3763 id, kvm->dirty_ring_size); 3764 if (r) 3765 goto arch_vcpu_destroy; 3766 } 3767 3768 mutex_lock(&kvm->lock); 3769 if (kvm_get_vcpu_by_id(kvm, id)) { 3770 r = -EEXIST; 3771 goto unlock_vcpu_destroy; 3772 } 3773 3774 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3775 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 3776 BUG_ON(r == -EBUSY); 3777 if (r) 3778 goto unlock_vcpu_destroy; 3779 3780 /* Fill the stats id string for the vcpu */ 3781 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 3782 task_pid_nr(current), id); 3783 3784 /* Now it's all set up, let userspace reach it */ 3785 kvm_get_kvm(kvm); 3786 r = create_vcpu_fd(vcpu); 3787 if (r < 0) { 3788 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 3789 kvm_put_kvm_no_destroy(kvm); 3790 goto unlock_vcpu_destroy; 3791 } 3792 3793 /* 3794 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 3795 * pointer before kvm->online_vcpu's incremented value. 3796 */ 3797 smp_wmb(); 3798 atomic_inc(&kvm->online_vcpus); 3799 3800 mutex_unlock(&kvm->lock); 3801 kvm_arch_vcpu_postcreate(vcpu); 3802 kvm_create_vcpu_debugfs(vcpu); 3803 return r; 3804 3805 unlock_vcpu_destroy: 3806 mutex_unlock(&kvm->lock); 3807 kvm_dirty_ring_free(&vcpu->dirty_ring); 3808 arch_vcpu_destroy: 3809 kvm_arch_vcpu_destroy(vcpu); 3810 vcpu_free_run_page: 3811 free_page((unsigned long)vcpu->run); 3812 vcpu_free: 3813 kmem_cache_free(kvm_vcpu_cache, vcpu); 3814 vcpu_decrement: 3815 mutex_lock(&kvm->lock); 3816 kvm->created_vcpus--; 3817 mutex_unlock(&kvm->lock); 3818 return r; 3819 } 3820 3821 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3822 { 3823 if (sigset) { 3824 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3825 vcpu->sigset_active = 1; 3826 vcpu->sigset = *sigset; 3827 } else 3828 vcpu->sigset_active = 0; 3829 return 0; 3830 } 3831 3832 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 3833 size_t size, loff_t *offset) 3834 { 3835 struct kvm_vcpu *vcpu = file->private_data; 3836 3837 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 3838 &kvm_vcpu_stats_desc[0], &vcpu->stat, 3839 sizeof(vcpu->stat), user_buffer, size, offset); 3840 } 3841 3842 static const struct file_operations kvm_vcpu_stats_fops = { 3843 .read = kvm_vcpu_stats_read, 3844 .llseek = noop_llseek, 3845 }; 3846 3847 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 3848 { 3849 int fd; 3850 struct file *file; 3851 char name[15 + ITOA_MAX_LEN + 1]; 3852 3853 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 3854 3855 fd = get_unused_fd_flags(O_CLOEXEC); 3856 if (fd < 0) 3857 return fd; 3858 3859 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 3860 if (IS_ERR(file)) { 3861 put_unused_fd(fd); 3862 return PTR_ERR(file); 3863 } 3864 file->f_mode |= FMODE_PREAD; 3865 fd_install(fd, file); 3866 3867 return fd; 3868 } 3869 3870 static long kvm_vcpu_ioctl(struct file *filp, 3871 unsigned int ioctl, unsigned long arg) 3872 { 3873 struct kvm_vcpu *vcpu = filp->private_data; 3874 void __user *argp = (void __user *)arg; 3875 int r; 3876 struct kvm_fpu *fpu = NULL; 3877 struct kvm_sregs *kvm_sregs = NULL; 3878 3879 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 3880 return -EIO; 3881 3882 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3883 return -EINVAL; 3884 3885 /* 3886 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3887 * execution; mutex_lock() would break them. 3888 */ 3889 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3890 if (r != -ENOIOCTLCMD) 3891 return r; 3892 3893 if (mutex_lock_killable(&vcpu->mutex)) 3894 return -EINTR; 3895 switch (ioctl) { 3896 case KVM_RUN: { 3897 struct pid *oldpid; 3898 r = -EINVAL; 3899 if (arg) 3900 goto out; 3901 oldpid = rcu_access_pointer(vcpu->pid); 3902 if (unlikely(oldpid != task_pid(current))) { 3903 /* The thread running this VCPU changed. */ 3904 struct pid *newpid; 3905 3906 r = kvm_arch_vcpu_run_pid_change(vcpu); 3907 if (r) 3908 break; 3909 3910 newpid = get_task_pid(current, PIDTYPE_PID); 3911 rcu_assign_pointer(vcpu->pid, newpid); 3912 if (oldpid) 3913 synchronize_rcu(); 3914 put_pid(oldpid); 3915 } 3916 r = kvm_arch_vcpu_ioctl_run(vcpu); 3917 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3918 break; 3919 } 3920 case KVM_GET_REGS: { 3921 struct kvm_regs *kvm_regs; 3922 3923 r = -ENOMEM; 3924 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3925 if (!kvm_regs) 3926 goto out; 3927 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3928 if (r) 3929 goto out_free1; 3930 r = -EFAULT; 3931 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3932 goto out_free1; 3933 r = 0; 3934 out_free1: 3935 kfree(kvm_regs); 3936 break; 3937 } 3938 case KVM_SET_REGS: { 3939 struct kvm_regs *kvm_regs; 3940 3941 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3942 if (IS_ERR(kvm_regs)) { 3943 r = PTR_ERR(kvm_regs); 3944 goto out; 3945 } 3946 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3947 kfree(kvm_regs); 3948 break; 3949 } 3950 case KVM_GET_SREGS: { 3951 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3952 GFP_KERNEL_ACCOUNT); 3953 r = -ENOMEM; 3954 if (!kvm_sregs) 3955 goto out; 3956 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3957 if (r) 3958 goto out; 3959 r = -EFAULT; 3960 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3961 goto out; 3962 r = 0; 3963 break; 3964 } 3965 case KVM_SET_SREGS: { 3966 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3967 if (IS_ERR(kvm_sregs)) { 3968 r = PTR_ERR(kvm_sregs); 3969 kvm_sregs = NULL; 3970 goto out; 3971 } 3972 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3973 break; 3974 } 3975 case KVM_GET_MP_STATE: { 3976 struct kvm_mp_state mp_state; 3977 3978 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3979 if (r) 3980 goto out; 3981 r = -EFAULT; 3982 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3983 goto out; 3984 r = 0; 3985 break; 3986 } 3987 case KVM_SET_MP_STATE: { 3988 struct kvm_mp_state mp_state; 3989 3990 r = -EFAULT; 3991 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3992 goto out; 3993 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3994 break; 3995 } 3996 case KVM_TRANSLATE: { 3997 struct kvm_translation tr; 3998 3999 r = -EFAULT; 4000 if (copy_from_user(&tr, argp, sizeof(tr))) 4001 goto out; 4002 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4003 if (r) 4004 goto out; 4005 r = -EFAULT; 4006 if (copy_to_user(argp, &tr, sizeof(tr))) 4007 goto out; 4008 r = 0; 4009 break; 4010 } 4011 case KVM_SET_GUEST_DEBUG: { 4012 struct kvm_guest_debug dbg; 4013 4014 r = -EFAULT; 4015 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4016 goto out; 4017 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4018 break; 4019 } 4020 case KVM_SET_SIGNAL_MASK: { 4021 struct kvm_signal_mask __user *sigmask_arg = argp; 4022 struct kvm_signal_mask kvm_sigmask; 4023 sigset_t sigset, *p; 4024 4025 p = NULL; 4026 if (argp) { 4027 r = -EFAULT; 4028 if (copy_from_user(&kvm_sigmask, argp, 4029 sizeof(kvm_sigmask))) 4030 goto out; 4031 r = -EINVAL; 4032 if (kvm_sigmask.len != sizeof(sigset)) 4033 goto out; 4034 r = -EFAULT; 4035 if (copy_from_user(&sigset, sigmask_arg->sigset, 4036 sizeof(sigset))) 4037 goto out; 4038 p = &sigset; 4039 } 4040 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4041 break; 4042 } 4043 case KVM_GET_FPU: { 4044 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4045 r = -ENOMEM; 4046 if (!fpu) 4047 goto out; 4048 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4049 if (r) 4050 goto out; 4051 r = -EFAULT; 4052 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4053 goto out; 4054 r = 0; 4055 break; 4056 } 4057 case KVM_SET_FPU: { 4058 fpu = memdup_user(argp, sizeof(*fpu)); 4059 if (IS_ERR(fpu)) { 4060 r = PTR_ERR(fpu); 4061 fpu = NULL; 4062 goto out; 4063 } 4064 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4065 break; 4066 } 4067 case KVM_GET_STATS_FD: { 4068 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4069 break; 4070 } 4071 default: 4072 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4073 } 4074 out: 4075 mutex_unlock(&vcpu->mutex); 4076 kfree(fpu); 4077 kfree(kvm_sregs); 4078 return r; 4079 } 4080 4081 #ifdef CONFIG_KVM_COMPAT 4082 static long kvm_vcpu_compat_ioctl(struct file *filp, 4083 unsigned int ioctl, unsigned long arg) 4084 { 4085 struct kvm_vcpu *vcpu = filp->private_data; 4086 void __user *argp = compat_ptr(arg); 4087 int r; 4088 4089 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4090 return -EIO; 4091 4092 switch (ioctl) { 4093 case KVM_SET_SIGNAL_MASK: { 4094 struct kvm_signal_mask __user *sigmask_arg = argp; 4095 struct kvm_signal_mask kvm_sigmask; 4096 sigset_t sigset; 4097 4098 if (argp) { 4099 r = -EFAULT; 4100 if (copy_from_user(&kvm_sigmask, argp, 4101 sizeof(kvm_sigmask))) 4102 goto out; 4103 r = -EINVAL; 4104 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4105 goto out; 4106 r = -EFAULT; 4107 if (get_compat_sigset(&sigset, 4108 (compat_sigset_t __user *)sigmask_arg->sigset)) 4109 goto out; 4110 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4111 } else 4112 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4113 break; 4114 } 4115 default: 4116 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4117 } 4118 4119 out: 4120 return r; 4121 } 4122 #endif 4123 4124 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4125 { 4126 struct kvm_device *dev = filp->private_data; 4127 4128 if (dev->ops->mmap) 4129 return dev->ops->mmap(dev, vma); 4130 4131 return -ENODEV; 4132 } 4133 4134 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4135 int (*accessor)(struct kvm_device *dev, 4136 struct kvm_device_attr *attr), 4137 unsigned long arg) 4138 { 4139 struct kvm_device_attr attr; 4140 4141 if (!accessor) 4142 return -EPERM; 4143 4144 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4145 return -EFAULT; 4146 4147 return accessor(dev, &attr); 4148 } 4149 4150 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4151 unsigned long arg) 4152 { 4153 struct kvm_device *dev = filp->private_data; 4154 4155 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4156 return -EIO; 4157 4158 switch (ioctl) { 4159 case KVM_SET_DEVICE_ATTR: 4160 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4161 case KVM_GET_DEVICE_ATTR: 4162 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4163 case KVM_HAS_DEVICE_ATTR: 4164 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4165 default: 4166 if (dev->ops->ioctl) 4167 return dev->ops->ioctl(dev, ioctl, arg); 4168 4169 return -ENOTTY; 4170 } 4171 } 4172 4173 static int kvm_device_release(struct inode *inode, struct file *filp) 4174 { 4175 struct kvm_device *dev = filp->private_data; 4176 struct kvm *kvm = dev->kvm; 4177 4178 if (dev->ops->release) { 4179 mutex_lock(&kvm->lock); 4180 list_del(&dev->vm_node); 4181 dev->ops->release(dev); 4182 mutex_unlock(&kvm->lock); 4183 } 4184 4185 kvm_put_kvm(kvm); 4186 return 0; 4187 } 4188 4189 static const struct file_operations kvm_device_fops = { 4190 .unlocked_ioctl = kvm_device_ioctl, 4191 .release = kvm_device_release, 4192 KVM_COMPAT(kvm_device_ioctl), 4193 .mmap = kvm_device_mmap, 4194 }; 4195 4196 struct kvm_device *kvm_device_from_filp(struct file *filp) 4197 { 4198 if (filp->f_op != &kvm_device_fops) 4199 return NULL; 4200 4201 return filp->private_data; 4202 } 4203 4204 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4205 #ifdef CONFIG_KVM_MPIC 4206 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4207 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4208 #endif 4209 }; 4210 4211 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4212 { 4213 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4214 return -ENOSPC; 4215 4216 if (kvm_device_ops_table[type] != NULL) 4217 return -EEXIST; 4218 4219 kvm_device_ops_table[type] = ops; 4220 return 0; 4221 } 4222 4223 void kvm_unregister_device_ops(u32 type) 4224 { 4225 if (kvm_device_ops_table[type] != NULL) 4226 kvm_device_ops_table[type] = NULL; 4227 } 4228 4229 static int kvm_ioctl_create_device(struct kvm *kvm, 4230 struct kvm_create_device *cd) 4231 { 4232 const struct kvm_device_ops *ops = NULL; 4233 struct kvm_device *dev; 4234 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4235 int type; 4236 int ret; 4237 4238 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4239 return -ENODEV; 4240 4241 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4242 ops = kvm_device_ops_table[type]; 4243 if (ops == NULL) 4244 return -ENODEV; 4245 4246 if (test) 4247 return 0; 4248 4249 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4250 if (!dev) 4251 return -ENOMEM; 4252 4253 dev->ops = ops; 4254 dev->kvm = kvm; 4255 4256 mutex_lock(&kvm->lock); 4257 ret = ops->create(dev, type); 4258 if (ret < 0) { 4259 mutex_unlock(&kvm->lock); 4260 kfree(dev); 4261 return ret; 4262 } 4263 list_add(&dev->vm_node, &kvm->devices); 4264 mutex_unlock(&kvm->lock); 4265 4266 if (ops->init) 4267 ops->init(dev); 4268 4269 kvm_get_kvm(kvm); 4270 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4271 if (ret < 0) { 4272 kvm_put_kvm_no_destroy(kvm); 4273 mutex_lock(&kvm->lock); 4274 list_del(&dev->vm_node); 4275 mutex_unlock(&kvm->lock); 4276 ops->destroy(dev); 4277 return ret; 4278 } 4279 4280 cd->fd = ret; 4281 return 0; 4282 } 4283 4284 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4285 { 4286 switch (arg) { 4287 case KVM_CAP_USER_MEMORY: 4288 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4289 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4290 case KVM_CAP_INTERNAL_ERROR_DATA: 4291 #ifdef CONFIG_HAVE_KVM_MSI 4292 case KVM_CAP_SIGNAL_MSI: 4293 #endif 4294 #ifdef CONFIG_HAVE_KVM_IRQFD 4295 case KVM_CAP_IRQFD: 4296 case KVM_CAP_IRQFD_RESAMPLE: 4297 #endif 4298 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4299 case KVM_CAP_CHECK_EXTENSION_VM: 4300 case KVM_CAP_ENABLE_CAP_VM: 4301 case KVM_CAP_HALT_POLL: 4302 return 1; 4303 #ifdef CONFIG_KVM_MMIO 4304 case KVM_CAP_COALESCED_MMIO: 4305 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4306 case KVM_CAP_COALESCED_PIO: 4307 return 1; 4308 #endif 4309 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4310 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4311 return KVM_DIRTY_LOG_MANUAL_CAPS; 4312 #endif 4313 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4314 case KVM_CAP_IRQ_ROUTING: 4315 return KVM_MAX_IRQ_ROUTES; 4316 #endif 4317 #if KVM_ADDRESS_SPACE_NUM > 1 4318 case KVM_CAP_MULTI_ADDRESS_SPACE: 4319 return KVM_ADDRESS_SPACE_NUM; 4320 #endif 4321 case KVM_CAP_NR_MEMSLOTS: 4322 return KVM_USER_MEM_SLOTS; 4323 case KVM_CAP_DIRTY_LOG_RING: 4324 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 4325 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4326 #else 4327 return 0; 4328 #endif 4329 case KVM_CAP_BINARY_STATS_FD: 4330 return 1; 4331 default: 4332 break; 4333 } 4334 return kvm_vm_ioctl_check_extension(kvm, arg); 4335 } 4336 4337 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4338 { 4339 int r; 4340 4341 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4342 return -EINVAL; 4343 4344 /* the size should be power of 2 */ 4345 if (!size || (size & (size - 1))) 4346 return -EINVAL; 4347 4348 /* Should be bigger to keep the reserved entries, or a page */ 4349 if (size < kvm_dirty_ring_get_rsvd_entries() * 4350 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4351 return -EINVAL; 4352 4353 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4354 sizeof(struct kvm_dirty_gfn)) 4355 return -E2BIG; 4356 4357 /* We only allow it to set once */ 4358 if (kvm->dirty_ring_size) 4359 return -EINVAL; 4360 4361 mutex_lock(&kvm->lock); 4362 4363 if (kvm->created_vcpus) { 4364 /* We don't allow to change this value after vcpu created */ 4365 r = -EINVAL; 4366 } else { 4367 kvm->dirty_ring_size = size; 4368 r = 0; 4369 } 4370 4371 mutex_unlock(&kvm->lock); 4372 return r; 4373 } 4374 4375 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4376 { 4377 unsigned long i; 4378 struct kvm_vcpu *vcpu; 4379 int cleared = 0; 4380 4381 if (!kvm->dirty_ring_size) 4382 return -EINVAL; 4383 4384 mutex_lock(&kvm->slots_lock); 4385 4386 kvm_for_each_vcpu(i, vcpu, kvm) 4387 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4388 4389 mutex_unlock(&kvm->slots_lock); 4390 4391 if (cleared) 4392 kvm_flush_remote_tlbs(kvm); 4393 4394 return cleared; 4395 } 4396 4397 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4398 struct kvm_enable_cap *cap) 4399 { 4400 return -EINVAL; 4401 } 4402 4403 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4404 struct kvm_enable_cap *cap) 4405 { 4406 switch (cap->cap) { 4407 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4408 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4409 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4410 4411 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4412 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4413 4414 if (cap->flags || (cap->args[0] & ~allowed_options)) 4415 return -EINVAL; 4416 kvm->manual_dirty_log_protect = cap->args[0]; 4417 return 0; 4418 } 4419 #endif 4420 case KVM_CAP_HALT_POLL: { 4421 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4422 return -EINVAL; 4423 4424 kvm->max_halt_poll_ns = cap->args[0]; 4425 return 0; 4426 } 4427 case KVM_CAP_DIRTY_LOG_RING: 4428 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4429 default: 4430 return kvm_vm_ioctl_enable_cap(kvm, cap); 4431 } 4432 } 4433 4434 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4435 size_t size, loff_t *offset) 4436 { 4437 struct kvm *kvm = file->private_data; 4438 4439 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4440 &kvm_vm_stats_desc[0], &kvm->stat, 4441 sizeof(kvm->stat), user_buffer, size, offset); 4442 } 4443 4444 static const struct file_operations kvm_vm_stats_fops = { 4445 .read = kvm_vm_stats_read, 4446 .llseek = noop_llseek, 4447 }; 4448 4449 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4450 { 4451 int fd; 4452 struct file *file; 4453 4454 fd = get_unused_fd_flags(O_CLOEXEC); 4455 if (fd < 0) 4456 return fd; 4457 4458 file = anon_inode_getfile("kvm-vm-stats", 4459 &kvm_vm_stats_fops, kvm, O_RDONLY); 4460 if (IS_ERR(file)) { 4461 put_unused_fd(fd); 4462 return PTR_ERR(file); 4463 } 4464 file->f_mode |= FMODE_PREAD; 4465 fd_install(fd, file); 4466 4467 return fd; 4468 } 4469 4470 static long kvm_vm_ioctl(struct file *filp, 4471 unsigned int ioctl, unsigned long arg) 4472 { 4473 struct kvm *kvm = filp->private_data; 4474 void __user *argp = (void __user *)arg; 4475 int r; 4476 4477 if (kvm->mm != current->mm || kvm->vm_dead) 4478 return -EIO; 4479 switch (ioctl) { 4480 case KVM_CREATE_VCPU: 4481 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4482 break; 4483 case KVM_ENABLE_CAP: { 4484 struct kvm_enable_cap cap; 4485 4486 r = -EFAULT; 4487 if (copy_from_user(&cap, argp, sizeof(cap))) 4488 goto out; 4489 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4490 break; 4491 } 4492 case KVM_SET_USER_MEMORY_REGION: { 4493 struct kvm_userspace_memory_region kvm_userspace_mem; 4494 4495 r = -EFAULT; 4496 if (copy_from_user(&kvm_userspace_mem, argp, 4497 sizeof(kvm_userspace_mem))) 4498 goto out; 4499 4500 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4501 break; 4502 } 4503 case KVM_GET_DIRTY_LOG: { 4504 struct kvm_dirty_log log; 4505 4506 r = -EFAULT; 4507 if (copy_from_user(&log, argp, sizeof(log))) 4508 goto out; 4509 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4510 break; 4511 } 4512 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4513 case KVM_CLEAR_DIRTY_LOG: { 4514 struct kvm_clear_dirty_log log; 4515 4516 r = -EFAULT; 4517 if (copy_from_user(&log, argp, sizeof(log))) 4518 goto out; 4519 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4520 break; 4521 } 4522 #endif 4523 #ifdef CONFIG_KVM_MMIO 4524 case KVM_REGISTER_COALESCED_MMIO: { 4525 struct kvm_coalesced_mmio_zone zone; 4526 4527 r = -EFAULT; 4528 if (copy_from_user(&zone, argp, sizeof(zone))) 4529 goto out; 4530 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4531 break; 4532 } 4533 case KVM_UNREGISTER_COALESCED_MMIO: { 4534 struct kvm_coalesced_mmio_zone zone; 4535 4536 r = -EFAULT; 4537 if (copy_from_user(&zone, argp, sizeof(zone))) 4538 goto out; 4539 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4540 break; 4541 } 4542 #endif 4543 case KVM_IRQFD: { 4544 struct kvm_irqfd data; 4545 4546 r = -EFAULT; 4547 if (copy_from_user(&data, argp, sizeof(data))) 4548 goto out; 4549 r = kvm_irqfd(kvm, &data); 4550 break; 4551 } 4552 case KVM_IOEVENTFD: { 4553 struct kvm_ioeventfd data; 4554 4555 r = -EFAULT; 4556 if (copy_from_user(&data, argp, sizeof(data))) 4557 goto out; 4558 r = kvm_ioeventfd(kvm, &data); 4559 break; 4560 } 4561 #ifdef CONFIG_HAVE_KVM_MSI 4562 case KVM_SIGNAL_MSI: { 4563 struct kvm_msi msi; 4564 4565 r = -EFAULT; 4566 if (copy_from_user(&msi, argp, sizeof(msi))) 4567 goto out; 4568 r = kvm_send_userspace_msi(kvm, &msi); 4569 break; 4570 } 4571 #endif 4572 #ifdef __KVM_HAVE_IRQ_LINE 4573 case KVM_IRQ_LINE_STATUS: 4574 case KVM_IRQ_LINE: { 4575 struct kvm_irq_level irq_event; 4576 4577 r = -EFAULT; 4578 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4579 goto out; 4580 4581 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4582 ioctl == KVM_IRQ_LINE_STATUS); 4583 if (r) 4584 goto out; 4585 4586 r = -EFAULT; 4587 if (ioctl == KVM_IRQ_LINE_STATUS) { 4588 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4589 goto out; 4590 } 4591 4592 r = 0; 4593 break; 4594 } 4595 #endif 4596 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4597 case KVM_SET_GSI_ROUTING: { 4598 struct kvm_irq_routing routing; 4599 struct kvm_irq_routing __user *urouting; 4600 struct kvm_irq_routing_entry *entries = NULL; 4601 4602 r = -EFAULT; 4603 if (copy_from_user(&routing, argp, sizeof(routing))) 4604 goto out; 4605 r = -EINVAL; 4606 if (!kvm_arch_can_set_irq_routing(kvm)) 4607 goto out; 4608 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4609 goto out; 4610 if (routing.flags) 4611 goto out; 4612 if (routing.nr) { 4613 urouting = argp; 4614 entries = vmemdup_user(urouting->entries, 4615 array_size(sizeof(*entries), 4616 routing.nr)); 4617 if (IS_ERR(entries)) { 4618 r = PTR_ERR(entries); 4619 goto out; 4620 } 4621 } 4622 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4623 routing.flags); 4624 kvfree(entries); 4625 break; 4626 } 4627 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4628 case KVM_CREATE_DEVICE: { 4629 struct kvm_create_device cd; 4630 4631 r = -EFAULT; 4632 if (copy_from_user(&cd, argp, sizeof(cd))) 4633 goto out; 4634 4635 r = kvm_ioctl_create_device(kvm, &cd); 4636 if (r) 4637 goto out; 4638 4639 r = -EFAULT; 4640 if (copy_to_user(argp, &cd, sizeof(cd))) 4641 goto out; 4642 4643 r = 0; 4644 break; 4645 } 4646 case KVM_CHECK_EXTENSION: 4647 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4648 break; 4649 case KVM_RESET_DIRTY_RINGS: 4650 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4651 break; 4652 case KVM_GET_STATS_FD: 4653 r = kvm_vm_ioctl_get_stats_fd(kvm); 4654 break; 4655 default: 4656 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4657 } 4658 out: 4659 return r; 4660 } 4661 4662 #ifdef CONFIG_KVM_COMPAT 4663 struct compat_kvm_dirty_log { 4664 __u32 slot; 4665 __u32 padding1; 4666 union { 4667 compat_uptr_t dirty_bitmap; /* one bit per page */ 4668 __u64 padding2; 4669 }; 4670 }; 4671 4672 struct compat_kvm_clear_dirty_log { 4673 __u32 slot; 4674 __u32 num_pages; 4675 __u64 first_page; 4676 union { 4677 compat_uptr_t dirty_bitmap; /* one bit per page */ 4678 __u64 padding2; 4679 }; 4680 }; 4681 4682 static long kvm_vm_compat_ioctl(struct file *filp, 4683 unsigned int ioctl, unsigned long arg) 4684 { 4685 struct kvm *kvm = filp->private_data; 4686 int r; 4687 4688 if (kvm->mm != current->mm || kvm->vm_dead) 4689 return -EIO; 4690 switch (ioctl) { 4691 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4692 case KVM_CLEAR_DIRTY_LOG: { 4693 struct compat_kvm_clear_dirty_log compat_log; 4694 struct kvm_clear_dirty_log log; 4695 4696 if (copy_from_user(&compat_log, (void __user *)arg, 4697 sizeof(compat_log))) 4698 return -EFAULT; 4699 log.slot = compat_log.slot; 4700 log.num_pages = compat_log.num_pages; 4701 log.first_page = compat_log.first_page; 4702 log.padding2 = compat_log.padding2; 4703 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4704 4705 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4706 break; 4707 } 4708 #endif 4709 case KVM_GET_DIRTY_LOG: { 4710 struct compat_kvm_dirty_log compat_log; 4711 struct kvm_dirty_log log; 4712 4713 if (copy_from_user(&compat_log, (void __user *)arg, 4714 sizeof(compat_log))) 4715 return -EFAULT; 4716 log.slot = compat_log.slot; 4717 log.padding1 = compat_log.padding1; 4718 log.padding2 = compat_log.padding2; 4719 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4720 4721 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4722 break; 4723 } 4724 default: 4725 r = kvm_vm_ioctl(filp, ioctl, arg); 4726 } 4727 return r; 4728 } 4729 #endif 4730 4731 static const struct file_operations kvm_vm_fops = { 4732 .release = kvm_vm_release, 4733 .unlocked_ioctl = kvm_vm_ioctl, 4734 .llseek = noop_llseek, 4735 KVM_COMPAT(kvm_vm_compat_ioctl), 4736 }; 4737 4738 bool file_is_kvm(struct file *file) 4739 { 4740 return file && file->f_op == &kvm_vm_fops; 4741 } 4742 EXPORT_SYMBOL_GPL(file_is_kvm); 4743 4744 static int kvm_dev_ioctl_create_vm(unsigned long type) 4745 { 4746 int r; 4747 struct kvm *kvm; 4748 struct file *file; 4749 4750 kvm = kvm_create_vm(type); 4751 if (IS_ERR(kvm)) 4752 return PTR_ERR(kvm); 4753 #ifdef CONFIG_KVM_MMIO 4754 r = kvm_coalesced_mmio_init(kvm); 4755 if (r < 0) 4756 goto put_kvm; 4757 #endif 4758 r = get_unused_fd_flags(O_CLOEXEC); 4759 if (r < 0) 4760 goto put_kvm; 4761 4762 snprintf(kvm->stats_id, sizeof(kvm->stats_id), 4763 "kvm-%d", task_pid_nr(current)); 4764 4765 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4766 if (IS_ERR(file)) { 4767 put_unused_fd(r); 4768 r = PTR_ERR(file); 4769 goto put_kvm; 4770 } 4771 4772 /* 4773 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4774 * already set, with ->release() being kvm_vm_release(). In error 4775 * cases it will be called by the final fput(file) and will take 4776 * care of doing kvm_put_kvm(kvm). 4777 */ 4778 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4779 put_unused_fd(r); 4780 fput(file); 4781 return -ENOMEM; 4782 } 4783 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4784 4785 fd_install(r, file); 4786 return r; 4787 4788 put_kvm: 4789 kvm_put_kvm(kvm); 4790 return r; 4791 } 4792 4793 static long kvm_dev_ioctl(struct file *filp, 4794 unsigned int ioctl, unsigned long arg) 4795 { 4796 long r = -EINVAL; 4797 4798 switch (ioctl) { 4799 case KVM_GET_API_VERSION: 4800 if (arg) 4801 goto out; 4802 r = KVM_API_VERSION; 4803 break; 4804 case KVM_CREATE_VM: 4805 r = kvm_dev_ioctl_create_vm(arg); 4806 break; 4807 case KVM_CHECK_EXTENSION: 4808 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4809 break; 4810 case KVM_GET_VCPU_MMAP_SIZE: 4811 if (arg) 4812 goto out; 4813 r = PAGE_SIZE; /* struct kvm_run */ 4814 #ifdef CONFIG_X86 4815 r += PAGE_SIZE; /* pio data page */ 4816 #endif 4817 #ifdef CONFIG_KVM_MMIO 4818 r += PAGE_SIZE; /* coalesced mmio ring page */ 4819 #endif 4820 break; 4821 case KVM_TRACE_ENABLE: 4822 case KVM_TRACE_PAUSE: 4823 case KVM_TRACE_DISABLE: 4824 r = -EOPNOTSUPP; 4825 break; 4826 default: 4827 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4828 } 4829 out: 4830 return r; 4831 } 4832 4833 static struct file_operations kvm_chardev_ops = { 4834 .unlocked_ioctl = kvm_dev_ioctl, 4835 .llseek = noop_llseek, 4836 KVM_COMPAT(kvm_dev_ioctl), 4837 }; 4838 4839 static struct miscdevice kvm_dev = { 4840 KVM_MINOR, 4841 "kvm", 4842 &kvm_chardev_ops, 4843 }; 4844 4845 static void hardware_enable_nolock(void *junk) 4846 { 4847 int cpu = raw_smp_processor_id(); 4848 int r; 4849 4850 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4851 return; 4852 4853 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4854 4855 r = kvm_arch_hardware_enable(); 4856 4857 if (r) { 4858 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4859 atomic_inc(&hardware_enable_failed); 4860 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4861 } 4862 } 4863 4864 static int kvm_starting_cpu(unsigned int cpu) 4865 { 4866 raw_spin_lock(&kvm_count_lock); 4867 if (kvm_usage_count) 4868 hardware_enable_nolock(NULL); 4869 raw_spin_unlock(&kvm_count_lock); 4870 return 0; 4871 } 4872 4873 static void hardware_disable_nolock(void *junk) 4874 { 4875 int cpu = raw_smp_processor_id(); 4876 4877 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4878 return; 4879 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4880 kvm_arch_hardware_disable(); 4881 } 4882 4883 static int kvm_dying_cpu(unsigned int cpu) 4884 { 4885 raw_spin_lock(&kvm_count_lock); 4886 if (kvm_usage_count) 4887 hardware_disable_nolock(NULL); 4888 raw_spin_unlock(&kvm_count_lock); 4889 return 0; 4890 } 4891 4892 static void hardware_disable_all_nolock(void) 4893 { 4894 BUG_ON(!kvm_usage_count); 4895 4896 kvm_usage_count--; 4897 if (!kvm_usage_count) 4898 on_each_cpu(hardware_disable_nolock, NULL, 1); 4899 } 4900 4901 static void hardware_disable_all(void) 4902 { 4903 raw_spin_lock(&kvm_count_lock); 4904 hardware_disable_all_nolock(); 4905 raw_spin_unlock(&kvm_count_lock); 4906 } 4907 4908 static int hardware_enable_all(void) 4909 { 4910 int r = 0; 4911 4912 raw_spin_lock(&kvm_count_lock); 4913 4914 kvm_usage_count++; 4915 if (kvm_usage_count == 1) { 4916 atomic_set(&hardware_enable_failed, 0); 4917 on_each_cpu(hardware_enable_nolock, NULL, 1); 4918 4919 if (atomic_read(&hardware_enable_failed)) { 4920 hardware_disable_all_nolock(); 4921 r = -EBUSY; 4922 } 4923 } 4924 4925 raw_spin_unlock(&kvm_count_lock); 4926 4927 return r; 4928 } 4929 4930 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4931 void *v) 4932 { 4933 /* 4934 * Some (well, at least mine) BIOSes hang on reboot if 4935 * in vmx root mode. 4936 * 4937 * And Intel TXT required VMX off for all cpu when system shutdown. 4938 */ 4939 pr_info("kvm: exiting hardware virtualization\n"); 4940 kvm_rebooting = true; 4941 on_each_cpu(hardware_disable_nolock, NULL, 1); 4942 return NOTIFY_OK; 4943 } 4944 4945 static struct notifier_block kvm_reboot_notifier = { 4946 .notifier_call = kvm_reboot, 4947 .priority = 0, 4948 }; 4949 4950 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4951 { 4952 int i; 4953 4954 for (i = 0; i < bus->dev_count; i++) { 4955 struct kvm_io_device *pos = bus->range[i].dev; 4956 4957 kvm_iodevice_destructor(pos); 4958 } 4959 kfree(bus); 4960 } 4961 4962 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4963 const struct kvm_io_range *r2) 4964 { 4965 gpa_t addr1 = r1->addr; 4966 gpa_t addr2 = r2->addr; 4967 4968 if (addr1 < addr2) 4969 return -1; 4970 4971 /* If r2->len == 0, match the exact address. If r2->len != 0, 4972 * accept any overlapping write. Any order is acceptable for 4973 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4974 * we process all of them. 4975 */ 4976 if (r2->len) { 4977 addr1 += r1->len; 4978 addr2 += r2->len; 4979 } 4980 4981 if (addr1 > addr2) 4982 return 1; 4983 4984 return 0; 4985 } 4986 4987 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4988 { 4989 return kvm_io_bus_cmp(p1, p2); 4990 } 4991 4992 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4993 gpa_t addr, int len) 4994 { 4995 struct kvm_io_range *range, key; 4996 int off; 4997 4998 key = (struct kvm_io_range) { 4999 .addr = addr, 5000 .len = len, 5001 }; 5002 5003 range = bsearch(&key, bus->range, bus->dev_count, 5004 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5005 if (range == NULL) 5006 return -ENOENT; 5007 5008 off = range - bus->range; 5009 5010 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5011 off--; 5012 5013 return off; 5014 } 5015 5016 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5017 struct kvm_io_range *range, const void *val) 5018 { 5019 int idx; 5020 5021 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5022 if (idx < 0) 5023 return -EOPNOTSUPP; 5024 5025 while (idx < bus->dev_count && 5026 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5027 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5028 range->len, val)) 5029 return idx; 5030 idx++; 5031 } 5032 5033 return -EOPNOTSUPP; 5034 } 5035 5036 /* kvm_io_bus_write - called under kvm->slots_lock */ 5037 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5038 int len, const void *val) 5039 { 5040 struct kvm_io_bus *bus; 5041 struct kvm_io_range range; 5042 int r; 5043 5044 range = (struct kvm_io_range) { 5045 .addr = addr, 5046 .len = len, 5047 }; 5048 5049 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5050 if (!bus) 5051 return -ENOMEM; 5052 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5053 return r < 0 ? r : 0; 5054 } 5055 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5056 5057 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5058 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5059 gpa_t addr, int len, const void *val, long cookie) 5060 { 5061 struct kvm_io_bus *bus; 5062 struct kvm_io_range range; 5063 5064 range = (struct kvm_io_range) { 5065 .addr = addr, 5066 .len = len, 5067 }; 5068 5069 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5070 if (!bus) 5071 return -ENOMEM; 5072 5073 /* First try the device referenced by cookie. */ 5074 if ((cookie >= 0) && (cookie < bus->dev_count) && 5075 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5076 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5077 val)) 5078 return cookie; 5079 5080 /* 5081 * cookie contained garbage; fall back to search and return the 5082 * correct cookie value. 5083 */ 5084 return __kvm_io_bus_write(vcpu, bus, &range, val); 5085 } 5086 5087 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5088 struct kvm_io_range *range, void *val) 5089 { 5090 int idx; 5091 5092 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5093 if (idx < 0) 5094 return -EOPNOTSUPP; 5095 5096 while (idx < bus->dev_count && 5097 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5098 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5099 range->len, val)) 5100 return idx; 5101 idx++; 5102 } 5103 5104 return -EOPNOTSUPP; 5105 } 5106 5107 /* kvm_io_bus_read - called under kvm->slots_lock */ 5108 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5109 int len, void *val) 5110 { 5111 struct kvm_io_bus *bus; 5112 struct kvm_io_range range; 5113 int r; 5114 5115 range = (struct kvm_io_range) { 5116 .addr = addr, 5117 .len = len, 5118 }; 5119 5120 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5121 if (!bus) 5122 return -ENOMEM; 5123 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5124 return r < 0 ? r : 0; 5125 } 5126 5127 /* Caller must hold slots_lock. */ 5128 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5129 int len, struct kvm_io_device *dev) 5130 { 5131 int i; 5132 struct kvm_io_bus *new_bus, *bus; 5133 struct kvm_io_range range; 5134 5135 bus = kvm_get_bus(kvm, bus_idx); 5136 if (!bus) 5137 return -ENOMEM; 5138 5139 /* exclude ioeventfd which is limited by maximum fd */ 5140 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5141 return -ENOSPC; 5142 5143 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5144 GFP_KERNEL_ACCOUNT); 5145 if (!new_bus) 5146 return -ENOMEM; 5147 5148 range = (struct kvm_io_range) { 5149 .addr = addr, 5150 .len = len, 5151 .dev = dev, 5152 }; 5153 5154 for (i = 0; i < bus->dev_count; i++) 5155 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5156 break; 5157 5158 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5159 new_bus->dev_count++; 5160 new_bus->range[i] = range; 5161 memcpy(new_bus->range + i + 1, bus->range + i, 5162 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5163 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5164 synchronize_srcu_expedited(&kvm->srcu); 5165 kfree(bus); 5166 5167 return 0; 5168 } 5169 5170 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5171 struct kvm_io_device *dev) 5172 { 5173 int i, j; 5174 struct kvm_io_bus *new_bus, *bus; 5175 5176 lockdep_assert_held(&kvm->slots_lock); 5177 5178 bus = kvm_get_bus(kvm, bus_idx); 5179 if (!bus) 5180 return 0; 5181 5182 for (i = 0; i < bus->dev_count; i++) { 5183 if (bus->range[i].dev == dev) { 5184 break; 5185 } 5186 } 5187 5188 if (i == bus->dev_count) 5189 return 0; 5190 5191 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5192 GFP_KERNEL_ACCOUNT); 5193 if (new_bus) { 5194 memcpy(new_bus, bus, struct_size(bus, range, i)); 5195 new_bus->dev_count--; 5196 memcpy(new_bus->range + i, bus->range + i + 1, 5197 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5198 } 5199 5200 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5201 synchronize_srcu_expedited(&kvm->srcu); 5202 5203 /* Destroy the old bus _after_ installing the (null) bus. */ 5204 if (!new_bus) { 5205 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5206 for (j = 0; j < bus->dev_count; j++) { 5207 if (j == i) 5208 continue; 5209 kvm_iodevice_destructor(bus->range[j].dev); 5210 } 5211 } 5212 5213 kfree(bus); 5214 return new_bus ? 0 : -ENOMEM; 5215 } 5216 5217 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5218 gpa_t addr) 5219 { 5220 struct kvm_io_bus *bus; 5221 int dev_idx, srcu_idx; 5222 struct kvm_io_device *iodev = NULL; 5223 5224 srcu_idx = srcu_read_lock(&kvm->srcu); 5225 5226 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5227 if (!bus) 5228 goto out_unlock; 5229 5230 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5231 if (dev_idx < 0) 5232 goto out_unlock; 5233 5234 iodev = bus->range[dev_idx].dev; 5235 5236 out_unlock: 5237 srcu_read_unlock(&kvm->srcu, srcu_idx); 5238 5239 return iodev; 5240 } 5241 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5242 5243 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5244 int (*get)(void *, u64 *), int (*set)(void *, u64), 5245 const char *fmt) 5246 { 5247 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5248 inode->i_private; 5249 5250 /* 5251 * The debugfs files are a reference to the kvm struct which 5252 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5253 * avoids the race between open and the removal of the debugfs directory. 5254 */ 5255 if (!kvm_get_kvm_safe(stat_data->kvm)) 5256 return -ENOENT; 5257 5258 if (simple_attr_open(inode, file, get, 5259 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5260 ? set : NULL, 5261 fmt)) { 5262 kvm_put_kvm(stat_data->kvm); 5263 return -ENOMEM; 5264 } 5265 5266 return 0; 5267 } 5268 5269 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5270 { 5271 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 5272 inode->i_private; 5273 5274 simple_attr_release(inode, file); 5275 kvm_put_kvm(stat_data->kvm); 5276 5277 return 0; 5278 } 5279 5280 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5281 { 5282 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5283 5284 return 0; 5285 } 5286 5287 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5288 { 5289 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5290 5291 return 0; 5292 } 5293 5294 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5295 { 5296 unsigned long i; 5297 struct kvm_vcpu *vcpu; 5298 5299 *val = 0; 5300 5301 kvm_for_each_vcpu(i, vcpu, kvm) 5302 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5303 5304 return 0; 5305 } 5306 5307 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5308 { 5309 unsigned long i; 5310 struct kvm_vcpu *vcpu; 5311 5312 kvm_for_each_vcpu(i, vcpu, kvm) 5313 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5314 5315 return 0; 5316 } 5317 5318 static int kvm_stat_data_get(void *data, u64 *val) 5319 { 5320 int r = -EFAULT; 5321 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5322 5323 switch (stat_data->kind) { 5324 case KVM_STAT_VM: 5325 r = kvm_get_stat_per_vm(stat_data->kvm, 5326 stat_data->desc->desc.offset, val); 5327 break; 5328 case KVM_STAT_VCPU: 5329 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5330 stat_data->desc->desc.offset, val); 5331 break; 5332 } 5333 5334 return r; 5335 } 5336 5337 static int kvm_stat_data_clear(void *data, u64 val) 5338 { 5339 int r = -EFAULT; 5340 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5341 5342 if (val) 5343 return -EINVAL; 5344 5345 switch (stat_data->kind) { 5346 case KVM_STAT_VM: 5347 r = kvm_clear_stat_per_vm(stat_data->kvm, 5348 stat_data->desc->desc.offset); 5349 break; 5350 case KVM_STAT_VCPU: 5351 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5352 stat_data->desc->desc.offset); 5353 break; 5354 } 5355 5356 return r; 5357 } 5358 5359 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5360 { 5361 __simple_attr_check_format("%llu\n", 0ull); 5362 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5363 kvm_stat_data_clear, "%llu\n"); 5364 } 5365 5366 static const struct file_operations stat_fops_per_vm = { 5367 .owner = THIS_MODULE, 5368 .open = kvm_stat_data_open, 5369 .release = kvm_debugfs_release, 5370 .read = simple_attr_read, 5371 .write = simple_attr_write, 5372 .llseek = no_llseek, 5373 }; 5374 5375 static int vm_stat_get(void *_offset, u64 *val) 5376 { 5377 unsigned offset = (long)_offset; 5378 struct kvm *kvm; 5379 u64 tmp_val; 5380 5381 *val = 0; 5382 mutex_lock(&kvm_lock); 5383 list_for_each_entry(kvm, &vm_list, vm_list) { 5384 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5385 *val += tmp_val; 5386 } 5387 mutex_unlock(&kvm_lock); 5388 return 0; 5389 } 5390 5391 static int vm_stat_clear(void *_offset, u64 val) 5392 { 5393 unsigned offset = (long)_offset; 5394 struct kvm *kvm; 5395 5396 if (val) 5397 return -EINVAL; 5398 5399 mutex_lock(&kvm_lock); 5400 list_for_each_entry(kvm, &vm_list, vm_list) { 5401 kvm_clear_stat_per_vm(kvm, offset); 5402 } 5403 mutex_unlock(&kvm_lock); 5404 5405 return 0; 5406 } 5407 5408 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5409 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5410 5411 static int vcpu_stat_get(void *_offset, u64 *val) 5412 { 5413 unsigned offset = (long)_offset; 5414 struct kvm *kvm; 5415 u64 tmp_val; 5416 5417 *val = 0; 5418 mutex_lock(&kvm_lock); 5419 list_for_each_entry(kvm, &vm_list, vm_list) { 5420 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5421 *val += tmp_val; 5422 } 5423 mutex_unlock(&kvm_lock); 5424 return 0; 5425 } 5426 5427 static int vcpu_stat_clear(void *_offset, u64 val) 5428 { 5429 unsigned offset = (long)_offset; 5430 struct kvm *kvm; 5431 5432 if (val) 5433 return -EINVAL; 5434 5435 mutex_lock(&kvm_lock); 5436 list_for_each_entry(kvm, &vm_list, vm_list) { 5437 kvm_clear_stat_per_vcpu(kvm, offset); 5438 } 5439 mutex_unlock(&kvm_lock); 5440 5441 return 0; 5442 } 5443 5444 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5445 "%llu\n"); 5446 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5447 5448 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5449 { 5450 struct kobj_uevent_env *env; 5451 unsigned long long created, active; 5452 5453 if (!kvm_dev.this_device || !kvm) 5454 return; 5455 5456 mutex_lock(&kvm_lock); 5457 if (type == KVM_EVENT_CREATE_VM) { 5458 kvm_createvm_count++; 5459 kvm_active_vms++; 5460 } else if (type == KVM_EVENT_DESTROY_VM) { 5461 kvm_active_vms--; 5462 } 5463 created = kvm_createvm_count; 5464 active = kvm_active_vms; 5465 mutex_unlock(&kvm_lock); 5466 5467 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5468 if (!env) 5469 return; 5470 5471 add_uevent_var(env, "CREATED=%llu", created); 5472 add_uevent_var(env, "COUNT=%llu", active); 5473 5474 if (type == KVM_EVENT_CREATE_VM) { 5475 add_uevent_var(env, "EVENT=create"); 5476 kvm->userspace_pid = task_pid_nr(current); 5477 } else if (type == KVM_EVENT_DESTROY_VM) { 5478 add_uevent_var(env, "EVENT=destroy"); 5479 } 5480 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5481 5482 if (kvm->debugfs_dentry) { 5483 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5484 5485 if (p) { 5486 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5487 if (!IS_ERR(tmp)) 5488 add_uevent_var(env, "STATS_PATH=%s", tmp); 5489 kfree(p); 5490 } 5491 } 5492 /* no need for checks, since we are adding at most only 5 keys */ 5493 env->envp[env->envp_idx++] = NULL; 5494 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5495 kfree(env); 5496 } 5497 5498 static void kvm_init_debug(void) 5499 { 5500 const struct file_operations *fops; 5501 const struct _kvm_stats_desc *pdesc; 5502 int i; 5503 5504 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5505 5506 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5507 pdesc = &kvm_vm_stats_desc[i]; 5508 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5509 fops = &vm_stat_fops; 5510 else 5511 fops = &vm_stat_readonly_fops; 5512 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5513 kvm_debugfs_dir, 5514 (void *)(long)pdesc->desc.offset, fops); 5515 } 5516 5517 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5518 pdesc = &kvm_vcpu_stats_desc[i]; 5519 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5520 fops = &vcpu_stat_fops; 5521 else 5522 fops = &vcpu_stat_readonly_fops; 5523 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5524 kvm_debugfs_dir, 5525 (void *)(long)pdesc->desc.offset, fops); 5526 } 5527 } 5528 5529 static int kvm_suspend(void) 5530 { 5531 if (kvm_usage_count) 5532 hardware_disable_nolock(NULL); 5533 return 0; 5534 } 5535 5536 static void kvm_resume(void) 5537 { 5538 if (kvm_usage_count) { 5539 lockdep_assert_not_held(&kvm_count_lock); 5540 hardware_enable_nolock(NULL); 5541 } 5542 } 5543 5544 static struct syscore_ops kvm_syscore_ops = { 5545 .suspend = kvm_suspend, 5546 .resume = kvm_resume, 5547 }; 5548 5549 static inline 5550 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5551 { 5552 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5553 } 5554 5555 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5556 { 5557 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5558 5559 WRITE_ONCE(vcpu->preempted, false); 5560 WRITE_ONCE(vcpu->ready, false); 5561 5562 __this_cpu_write(kvm_running_vcpu, vcpu); 5563 kvm_arch_sched_in(vcpu, cpu); 5564 kvm_arch_vcpu_load(vcpu, cpu); 5565 } 5566 5567 static void kvm_sched_out(struct preempt_notifier *pn, 5568 struct task_struct *next) 5569 { 5570 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5571 5572 if (current->on_rq) { 5573 WRITE_ONCE(vcpu->preempted, true); 5574 WRITE_ONCE(vcpu->ready, true); 5575 } 5576 kvm_arch_vcpu_put(vcpu); 5577 __this_cpu_write(kvm_running_vcpu, NULL); 5578 } 5579 5580 /** 5581 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5582 * 5583 * We can disable preemption locally around accessing the per-CPU variable, 5584 * and use the resolved vcpu pointer after enabling preemption again, 5585 * because even if the current thread is migrated to another CPU, reading 5586 * the per-CPU value later will give us the same value as we update the 5587 * per-CPU variable in the preempt notifier handlers. 5588 */ 5589 struct kvm_vcpu *kvm_get_running_vcpu(void) 5590 { 5591 struct kvm_vcpu *vcpu; 5592 5593 preempt_disable(); 5594 vcpu = __this_cpu_read(kvm_running_vcpu); 5595 preempt_enable(); 5596 5597 return vcpu; 5598 } 5599 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5600 5601 /** 5602 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5603 */ 5604 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5605 { 5606 return &kvm_running_vcpu; 5607 } 5608 5609 #ifdef CONFIG_GUEST_PERF_EVENTS 5610 static unsigned int kvm_guest_state(void) 5611 { 5612 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5613 unsigned int state; 5614 5615 if (!kvm_arch_pmi_in_guest(vcpu)) 5616 return 0; 5617 5618 state = PERF_GUEST_ACTIVE; 5619 if (!kvm_arch_vcpu_in_kernel(vcpu)) 5620 state |= PERF_GUEST_USER; 5621 5622 return state; 5623 } 5624 5625 static unsigned long kvm_guest_get_ip(void) 5626 { 5627 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 5628 5629 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 5630 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 5631 return 0; 5632 5633 return kvm_arch_vcpu_get_ip(vcpu); 5634 } 5635 5636 static struct perf_guest_info_callbacks kvm_guest_cbs = { 5637 .state = kvm_guest_state, 5638 .get_ip = kvm_guest_get_ip, 5639 .handle_intel_pt_intr = NULL, 5640 }; 5641 5642 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 5643 { 5644 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 5645 perf_register_guest_info_callbacks(&kvm_guest_cbs); 5646 } 5647 void kvm_unregister_perf_callbacks(void) 5648 { 5649 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 5650 } 5651 #endif 5652 5653 struct kvm_cpu_compat_check { 5654 void *opaque; 5655 int *ret; 5656 }; 5657 5658 static void check_processor_compat(void *data) 5659 { 5660 struct kvm_cpu_compat_check *c = data; 5661 5662 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5663 } 5664 5665 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5666 struct module *module) 5667 { 5668 struct kvm_cpu_compat_check c; 5669 int r; 5670 int cpu; 5671 5672 r = kvm_arch_init(opaque); 5673 if (r) 5674 goto out_fail; 5675 5676 /* 5677 * kvm_arch_init makes sure there's at most one caller 5678 * for architectures that support multiple implementations, 5679 * like intel and amd on x86. 5680 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5681 * conflicts in case kvm is already setup for another implementation. 5682 */ 5683 r = kvm_irqfd_init(); 5684 if (r) 5685 goto out_irqfd; 5686 5687 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5688 r = -ENOMEM; 5689 goto out_free_0; 5690 } 5691 5692 r = kvm_arch_hardware_setup(opaque); 5693 if (r < 0) 5694 goto out_free_1; 5695 5696 c.ret = &r; 5697 c.opaque = opaque; 5698 for_each_online_cpu(cpu) { 5699 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5700 if (r < 0) 5701 goto out_free_2; 5702 } 5703 5704 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5705 kvm_starting_cpu, kvm_dying_cpu); 5706 if (r) 5707 goto out_free_2; 5708 register_reboot_notifier(&kvm_reboot_notifier); 5709 5710 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5711 if (!vcpu_align) 5712 vcpu_align = __alignof__(struct kvm_vcpu); 5713 kvm_vcpu_cache = 5714 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5715 SLAB_ACCOUNT, 5716 offsetof(struct kvm_vcpu, arch), 5717 offsetofend(struct kvm_vcpu, stats_id) 5718 - offsetof(struct kvm_vcpu, arch), 5719 NULL); 5720 if (!kvm_vcpu_cache) { 5721 r = -ENOMEM; 5722 goto out_free_3; 5723 } 5724 5725 for_each_possible_cpu(cpu) { 5726 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 5727 GFP_KERNEL, cpu_to_node(cpu))) { 5728 r = -ENOMEM; 5729 goto out_free_4; 5730 } 5731 } 5732 5733 r = kvm_async_pf_init(); 5734 if (r) 5735 goto out_free_5; 5736 5737 kvm_chardev_ops.owner = module; 5738 5739 r = misc_register(&kvm_dev); 5740 if (r) { 5741 pr_err("kvm: misc device register failed\n"); 5742 goto out_unreg; 5743 } 5744 5745 register_syscore_ops(&kvm_syscore_ops); 5746 5747 kvm_preempt_ops.sched_in = kvm_sched_in; 5748 kvm_preempt_ops.sched_out = kvm_sched_out; 5749 5750 kvm_init_debug(); 5751 5752 r = kvm_vfio_ops_init(); 5753 WARN_ON(r); 5754 5755 return 0; 5756 5757 out_unreg: 5758 kvm_async_pf_deinit(); 5759 out_free_5: 5760 for_each_possible_cpu(cpu) 5761 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5762 out_free_4: 5763 kmem_cache_destroy(kvm_vcpu_cache); 5764 out_free_3: 5765 unregister_reboot_notifier(&kvm_reboot_notifier); 5766 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5767 out_free_2: 5768 kvm_arch_hardware_unsetup(); 5769 out_free_1: 5770 free_cpumask_var(cpus_hardware_enabled); 5771 out_free_0: 5772 kvm_irqfd_exit(); 5773 out_irqfd: 5774 kvm_arch_exit(); 5775 out_fail: 5776 return r; 5777 } 5778 EXPORT_SYMBOL_GPL(kvm_init); 5779 5780 void kvm_exit(void) 5781 { 5782 int cpu; 5783 5784 debugfs_remove_recursive(kvm_debugfs_dir); 5785 misc_deregister(&kvm_dev); 5786 for_each_possible_cpu(cpu) 5787 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 5788 kmem_cache_destroy(kvm_vcpu_cache); 5789 kvm_async_pf_deinit(); 5790 unregister_syscore_ops(&kvm_syscore_ops); 5791 unregister_reboot_notifier(&kvm_reboot_notifier); 5792 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5793 on_each_cpu(hardware_disable_nolock, NULL, 1); 5794 kvm_arch_hardware_unsetup(); 5795 kvm_arch_exit(); 5796 kvm_irqfd_exit(); 5797 free_cpumask_var(cpus_hardware_enabled); 5798 kvm_vfio_ops_exit(); 5799 } 5800 EXPORT_SYMBOL_GPL(kvm_exit); 5801 5802 struct kvm_vm_worker_thread_context { 5803 struct kvm *kvm; 5804 struct task_struct *parent; 5805 struct completion init_done; 5806 kvm_vm_thread_fn_t thread_fn; 5807 uintptr_t data; 5808 int err; 5809 }; 5810 5811 static int kvm_vm_worker_thread(void *context) 5812 { 5813 /* 5814 * The init_context is allocated on the stack of the parent thread, so 5815 * we have to locally copy anything that is needed beyond initialization 5816 */ 5817 struct kvm_vm_worker_thread_context *init_context = context; 5818 struct task_struct *parent; 5819 struct kvm *kvm = init_context->kvm; 5820 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5821 uintptr_t data = init_context->data; 5822 int err; 5823 5824 err = kthread_park(current); 5825 /* kthread_park(current) is never supposed to return an error */ 5826 WARN_ON(err != 0); 5827 if (err) 5828 goto init_complete; 5829 5830 err = cgroup_attach_task_all(init_context->parent, current); 5831 if (err) { 5832 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5833 __func__, err); 5834 goto init_complete; 5835 } 5836 5837 set_user_nice(current, task_nice(init_context->parent)); 5838 5839 init_complete: 5840 init_context->err = err; 5841 complete(&init_context->init_done); 5842 init_context = NULL; 5843 5844 if (err) 5845 goto out; 5846 5847 /* Wait to be woken up by the spawner before proceeding. */ 5848 kthread_parkme(); 5849 5850 if (!kthread_should_stop()) 5851 err = thread_fn(kvm, data); 5852 5853 out: 5854 /* 5855 * Move kthread back to its original cgroup to prevent it lingering in 5856 * the cgroup of the VM process, after the latter finishes its 5857 * execution. 5858 * 5859 * kthread_stop() waits on the 'exited' completion condition which is 5860 * set in exit_mm(), via mm_release(), in do_exit(). However, the 5861 * kthread is removed from the cgroup in the cgroup_exit() which is 5862 * called after the exit_mm(). This causes the kthread_stop() to return 5863 * before the kthread actually quits the cgroup. 5864 */ 5865 rcu_read_lock(); 5866 parent = rcu_dereference(current->real_parent); 5867 get_task_struct(parent); 5868 rcu_read_unlock(); 5869 cgroup_attach_task_all(parent, current); 5870 put_task_struct(parent); 5871 5872 return err; 5873 } 5874 5875 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5876 uintptr_t data, const char *name, 5877 struct task_struct **thread_ptr) 5878 { 5879 struct kvm_vm_worker_thread_context init_context = {}; 5880 struct task_struct *thread; 5881 5882 *thread_ptr = NULL; 5883 init_context.kvm = kvm; 5884 init_context.parent = current; 5885 init_context.thread_fn = thread_fn; 5886 init_context.data = data; 5887 init_completion(&init_context.init_done); 5888 5889 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5890 "%s-%d", name, task_pid_nr(current)); 5891 if (IS_ERR(thread)) 5892 return PTR_ERR(thread); 5893 5894 /* kthread_run is never supposed to return NULL */ 5895 WARN_ON(thread == NULL); 5896 5897 wait_for_completion(&init_context.init_done); 5898 5899 if (!init_context.err) 5900 *thread_ptr = thread; 5901 5902 return init_context.err; 5903 } 5904