1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "kvm_mm.h" 63 #include "vfio.h" 64 65 #include <trace/events/ipi.h> 66 67 #define CREATE_TRACE_POINTS 68 #include <trace/events/kvm.h> 69 70 #include <linux/kvm_dirty_ring.h> 71 72 73 /* Worst case buffer size needed for holding an integer. */ 74 #define ITOA_MAX_LEN 12 75 76 MODULE_AUTHOR("Qumranet"); 77 MODULE_LICENSE("GPL"); 78 79 /* Architectures should define their poll value according to the halt latency */ 80 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 81 module_param(halt_poll_ns, uint, 0644); 82 EXPORT_SYMBOL_GPL(halt_poll_ns); 83 84 /* Default doubles per-vcpu halt_poll_ns. */ 85 unsigned int halt_poll_ns_grow = 2; 86 module_param(halt_poll_ns_grow, uint, 0644); 87 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 88 89 /* The start value to grow halt_poll_ns from */ 90 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 91 module_param(halt_poll_ns_grow_start, uint, 0644); 92 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 93 94 /* Default resets per-vcpu halt_poll_ns . */ 95 unsigned int halt_poll_ns_shrink; 96 module_param(halt_poll_ns_shrink, uint, 0644); 97 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 98 99 /* 100 * Ordering of locks: 101 * 102 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 103 */ 104 105 DEFINE_MUTEX(kvm_lock); 106 LIST_HEAD(vm_list); 107 108 static struct kmem_cache *kvm_vcpu_cache; 109 110 static __read_mostly struct preempt_ops kvm_preempt_ops; 111 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 112 113 struct dentry *kvm_debugfs_dir; 114 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 115 116 static const struct file_operations stat_fops_per_vm; 117 118 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 119 unsigned long arg); 120 #ifdef CONFIG_KVM_COMPAT 121 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 122 unsigned long arg); 123 #define KVM_COMPAT(c) .compat_ioctl = (c) 124 #else 125 /* 126 * For architectures that don't implement a compat infrastructure, 127 * adopt a double line of defense: 128 * - Prevent a compat task from opening /dev/kvm 129 * - If the open has been done by a 64bit task, and the KVM fd 130 * passed to a compat task, let the ioctls fail. 131 */ 132 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 133 unsigned long arg) { return -EINVAL; } 134 135 static int kvm_no_compat_open(struct inode *inode, struct file *file) 136 { 137 return is_compat_task() ? -ENODEV : 0; 138 } 139 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 140 .open = kvm_no_compat_open 141 #endif 142 static int hardware_enable_all(void); 143 static void hardware_disable_all(void); 144 145 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 146 147 #define KVM_EVENT_CREATE_VM 0 148 #define KVM_EVENT_DESTROY_VM 1 149 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 150 static unsigned long long kvm_createvm_count; 151 static unsigned long long kvm_active_vms; 152 153 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 154 155 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 156 { 157 } 158 159 bool kvm_is_zone_device_page(struct page *page) 160 { 161 /* 162 * The metadata used by is_zone_device_page() to determine whether or 163 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 164 * the device has been pinned, e.g. by get_user_pages(). WARN if the 165 * page_count() is zero to help detect bad usage of this helper. 166 */ 167 if (WARN_ON_ONCE(!page_count(page))) 168 return false; 169 170 return is_zone_device_page(page); 171 } 172 173 /* 174 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted 175 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types 176 * is likely incomplete, it has been compiled purely through people wanting to 177 * back guest with a certain type of memory and encountering issues. 178 */ 179 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn) 180 { 181 struct page *page; 182 183 if (!pfn_valid(pfn)) 184 return NULL; 185 186 page = pfn_to_page(pfn); 187 if (!PageReserved(page)) 188 return page; 189 190 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */ 191 if (is_zero_pfn(pfn)) 192 return page; 193 194 /* 195 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 196 * perspective they are "normal" pages, albeit with slightly different 197 * usage rules. 198 */ 199 if (kvm_is_zone_device_page(page)) 200 return page; 201 202 return NULL; 203 } 204 205 /* 206 * Switches to specified vcpu, until a matching vcpu_put() 207 */ 208 void vcpu_load(struct kvm_vcpu *vcpu) 209 { 210 int cpu = get_cpu(); 211 212 __this_cpu_write(kvm_running_vcpu, vcpu); 213 preempt_notifier_register(&vcpu->preempt_notifier); 214 kvm_arch_vcpu_load(vcpu, cpu); 215 put_cpu(); 216 } 217 EXPORT_SYMBOL_GPL(vcpu_load); 218 219 void vcpu_put(struct kvm_vcpu *vcpu) 220 { 221 preempt_disable(); 222 kvm_arch_vcpu_put(vcpu); 223 preempt_notifier_unregister(&vcpu->preempt_notifier); 224 __this_cpu_write(kvm_running_vcpu, NULL); 225 preempt_enable(); 226 } 227 EXPORT_SYMBOL_GPL(vcpu_put); 228 229 /* TODO: merge with kvm_arch_vcpu_should_kick */ 230 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 231 { 232 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 233 234 /* 235 * We need to wait for the VCPU to reenable interrupts and get out of 236 * READING_SHADOW_PAGE_TABLES mode. 237 */ 238 if (req & KVM_REQUEST_WAIT) 239 return mode != OUTSIDE_GUEST_MODE; 240 241 /* 242 * Need to kick a running VCPU, but otherwise there is nothing to do. 243 */ 244 return mode == IN_GUEST_MODE; 245 } 246 247 static void ack_kick(void *_completed) 248 { 249 } 250 251 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 252 { 253 if (cpumask_empty(cpus)) 254 return false; 255 256 smp_call_function_many(cpus, ack_kick, NULL, wait); 257 return true; 258 } 259 260 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 261 struct cpumask *tmp, int current_cpu) 262 { 263 int cpu; 264 265 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 266 __kvm_make_request(req, vcpu); 267 268 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 269 return; 270 271 /* 272 * Note, the vCPU could get migrated to a different pCPU at any point 273 * after kvm_request_needs_ipi(), which could result in sending an IPI 274 * to the previous pCPU. But, that's OK because the purpose of the IPI 275 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 276 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 277 * after this point is also OK, as the requirement is only that KVM wait 278 * for vCPUs that were reading SPTEs _before_ any changes were 279 * finalized. See kvm_vcpu_kick() for more details on handling requests. 280 */ 281 if (kvm_request_needs_ipi(vcpu, req)) { 282 cpu = READ_ONCE(vcpu->cpu); 283 if (cpu != -1 && cpu != current_cpu) 284 __cpumask_set_cpu(cpu, tmp); 285 } 286 } 287 288 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 289 unsigned long *vcpu_bitmap) 290 { 291 struct kvm_vcpu *vcpu; 292 struct cpumask *cpus; 293 int i, me; 294 bool called; 295 296 me = get_cpu(); 297 298 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 299 cpumask_clear(cpus); 300 301 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 302 vcpu = kvm_get_vcpu(kvm, i); 303 if (!vcpu) 304 continue; 305 kvm_make_vcpu_request(vcpu, req, cpus, me); 306 } 307 308 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 309 put_cpu(); 310 311 return called; 312 } 313 314 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 315 struct kvm_vcpu *except) 316 { 317 struct kvm_vcpu *vcpu; 318 struct cpumask *cpus; 319 unsigned long i; 320 bool called; 321 int me; 322 323 me = get_cpu(); 324 325 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 326 cpumask_clear(cpus); 327 328 kvm_for_each_vcpu(i, vcpu, kvm) { 329 if (vcpu == except) 330 continue; 331 kvm_make_vcpu_request(vcpu, req, cpus, me); 332 } 333 334 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 335 put_cpu(); 336 337 return called; 338 } 339 340 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 341 { 342 return kvm_make_all_cpus_request_except(kvm, req, NULL); 343 } 344 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 345 346 void kvm_flush_remote_tlbs(struct kvm *kvm) 347 { 348 ++kvm->stat.generic.remote_tlb_flush_requests; 349 350 /* 351 * We want to publish modifications to the page tables before reading 352 * mode. Pairs with a memory barrier in arch-specific code. 353 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 354 * and smp_mb in walk_shadow_page_lockless_begin/end. 355 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 356 * 357 * There is already an smp_mb__after_atomic() before 358 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 359 * barrier here. 360 */ 361 if (!kvm_arch_flush_remote_tlbs(kvm) 362 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 363 ++kvm->stat.generic.remote_tlb_flush; 364 } 365 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 366 367 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) 368 { 369 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) 370 return; 371 372 /* 373 * Fall back to a flushing entire TLBs if the architecture range-based 374 * TLB invalidation is unsupported or can't be performed for whatever 375 * reason. 376 */ 377 kvm_flush_remote_tlbs(kvm); 378 } 379 380 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, 381 const struct kvm_memory_slot *memslot) 382 { 383 /* 384 * All current use cases for flushing the TLBs for a specific memslot 385 * are related to dirty logging, and many do the TLB flush out of 386 * mmu_lock. The interaction between the various operations on memslot 387 * must be serialized by slots_locks to ensure the TLB flush from one 388 * operation is observed by any other operation on the same memslot. 389 */ 390 lockdep_assert_held(&kvm->slots_lock); 391 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); 392 } 393 394 static void kvm_flush_shadow_all(struct kvm *kvm) 395 { 396 kvm_arch_flush_shadow_all(kvm); 397 kvm_arch_guest_memory_reclaimed(kvm); 398 } 399 400 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 401 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 402 gfp_t gfp_flags) 403 { 404 gfp_flags |= mc->gfp_zero; 405 406 if (mc->kmem_cache) 407 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 408 else 409 return (void *)__get_free_page(gfp_flags); 410 } 411 412 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) 413 { 414 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT; 415 void *obj; 416 417 if (mc->nobjs >= min) 418 return 0; 419 420 if (unlikely(!mc->objects)) { 421 if (WARN_ON_ONCE(!capacity)) 422 return -EIO; 423 424 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp); 425 if (!mc->objects) 426 return -ENOMEM; 427 428 mc->capacity = capacity; 429 } 430 431 /* It is illegal to request a different capacity across topups. */ 432 if (WARN_ON_ONCE(mc->capacity != capacity)) 433 return -EIO; 434 435 while (mc->nobjs < mc->capacity) { 436 obj = mmu_memory_cache_alloc_obj(mc, gfp); 437 if (!obj) 438 return mc->nobjs >= min ? 0 : -ENOMEM; 439 mc->objects[mc->nobjs++] = obj; 440 } 441 return 0; 442 } 443 444 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 445 { 446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min); 447 } 448 449 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 450 { 451 return mc->nobjs; 452 } 453 454 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 455 { 456 while (mc->nobjs) { 457 if (mc->kmem_cache) 458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 459 else 460 free_page((unsigned long)mc->objects[--mc->nobjs]); 461 } 462 463 kvfree(mc->objects); 464 465 mc->objects = NULL; 466 mc->capacity = 0; 467 } 468 469 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 470 { 471 void *p; 472 473 if (WARN_ON(!mc->nobjs)) 474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 475 else 476 p = mc->objects[--mc->nobjs]; 477 BUG_ON(!p); 478 return p; 479 } 480 #endif 481 482 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 483 { 484 mutex_init(&vcpu->mutex); 485 vcpu->cpu = -1; 486 vcpu->kvm = kvm; 487 vcpu->vcpu_id = id; 488 vcpu->pid = NULL; 489 #ifndef __KVM_HAVE_ARCH_WQP 490 rcuwait_init(&vcpu->wait); 491 #endif 492 kvm_async_pf_vcpu_init(vcpu); 493 494 kvm_vcpu_set_in_spin_loop(vcpu, false); 495 kvm_vcpu_set_dy_eligible(vcpu, false); 496 vcpu->preempted = false; 497 vcpu->ready = false; 498 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 499 vcpu->last_used_slot = NULL; 500 501 /* Fill the stats id string for the vcpu */ 502 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 503 task_pid_nr(current), id); 504 } 505 506 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 507 { 508 kvm_arch_vcpu_destroy(vcpu); 509 kvm_dirty_ring_free(&vcpu->dirty_ring); 510 511 /* 512 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 513 * the vcpu->pid pointer, and at destruction time all file descriptors 514 * are already gone. 515 */ 516 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 517 518 free_page((unsigned long)vcpu->run); 519 kmem_cache_free(kvm_vcpu_cache, vcpu); 520 } 521 522 void kvm_destroy_vcpus(struct kvm *kvm) 523 { 524 unsigned long i; 525 struct kvm_vcpu *vcpu; 526 527 kvm_for_each_vcpu(i, vcpu, kvm) { 528 kvm_vcpu_destroy(vcpu); 529 xa_erase(&kvm->vcpu_array, i); 530 } 531 532 atomic_set(&kvm->online_vcpus, 0); 533 } 534 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 535 536 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 537 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 538 { 539 return container_of(mn, struct kvm, mmu_notifier); 540 } 541 542 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 543 544 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 545 unsigned long end); 546 547 typedef void (*on_unlock_fn_t)(struct kvm *kvm); 548 549 struct kvm_hva_range { 550 unsigned long start; 551 unsigned long end; 552 union kvm_mmu_notifier_arg arg; 553 hva_handler_t handler; 554 on_lock_fn_t on_lock; 555 on_unlock_fn_t on_unlock; 556 bool flush_on_ret; 557 bool may_block; 558 }; 559 560 /* 561 * Use a dedicated stub instead of NULL to indicate that there is no callback 562 * function/handler. The compiler technically can't guarantee that a real 563 * function will have a non-zero address, and so it will generate code to 564 * check for !NULL, whereas comparing against a stub will be elided at compile 565 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 566 */ 567 static void kvm_null_fn(void) 568 { 569 570 } 571 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 572 573 static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG; 574 575 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 576 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 577 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 578 node; \ 579 node = interval_tree_iter_next(node, start, last)) \ 580 581 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 582 const struct kvm_hva_range *range) 583 { 584 bool ret = false, locked = false; 585 struct kvm_gfn_range gfn_range; 586 struct kvm_memory_slot *slot; 587 struct kvm_memslots *slots; 588 int i, idx; 589 590 if (WARN_ON_ONCE(range->end <= range->start)) 591 return 0; 592 593 /* A null handler is allowed if and only if on_lock() is provided. */ 594 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 595 IS_KVM_NULL_FN(range->handler))) 596 return 0; 597 598 idx = srcu_read_lock(&kvm->srcu); 599 600 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 601 struct interval_tree_node *node; 602 603 slots = __kvm_memslots(kvm, i); 604 kvm_for_each_memslot_in_hva_range(node, slots, 605 range->start, range->end - 1) { 606 unsigned long hva_start, hva_end; 607 608 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 609 hva_start = max(range->start, slot->userspace_addr); 610 hva_end = min(range->end, slot->userspace_addr + 611 (slot->npages << PAGE_SHIFT)); 612 613 /* 614 * To optimize for the likely case where the address 615 * range is covered by zero or one memslots, don't 616 * bother making these conditional (to avoid writes on 617 * the second or later invocation of the handler). 618 */ 619 gfn_range.arg = range->arg; 620 gfn_range.may_block = range->may_block; 621 622 /* 623 * {gfn(page) | page intersects with [hva_start, hva_end)} = 624 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 625 */ 626 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 627 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 628 gfn_range.slot = slot; 629 630 if (!locked) { 631 locked = true; 632 KVM_MMU_LOCK(kvm); 633 if (!IS_KVM_NULL_FN(range->on_lock)) 634 range->on_lock(kvm, range->start, range->end); 635 if (IS_KVM_NULL_FN(range->handler)) 636 break; 637 } 638 ret |= range->handler(kvm, &gfn_range); 639 } 640 } 641 642 if (range->flush_on_ret && ret) 643 kvm_flush_remote_tlbs(kvm); 644 645 if (locked) { 646 KVM_MMU_UNLOCK(kvm); 647 if (!IS_KVM_NULL_FN(range->on_unlock)) 648 range->on_unlock(kvm); 649 } 650 651 srcu_read_unlock(&kvm->srcu, idx); 652 653 /* The notifiers are averse to booleans. :-( */ 654 return (int)ret; 655 } 656 657 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 658 unsigned long start, 659 unsigned long end, 660 union kvm_mmu_notifier_arg arg, 661 hva_handler_t handler) 662 { 663 struct kvm *kvm = mmu_notifier_to_kvm(mn); 664 const struct kvm_hva_range range = { 665 .start = start, 666 .end = end, 667 .arg = arg, 668 .handler = handler, 669 .on_lock = (void *)kvm_null_fn, 670 .on_unlock = (void *)kvm_null_fn, 671 .flush_on_ret = true, 672 .may_block = false, 673 }; 674 675 return __kvm_handle_hva_range(kvm, &range); 676 } 677 678 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 679 unsigned long start, 680 unsigned long end, 681 hva_handler_t handler) 682 { 683 struct kvm *kvm = mmu_notifier_to_kvm(mn); 684 const struct kvm_hva_range range = { 685 .start = start, 686 .end = end, 687 .handler = handler, 688 .on_lock = (void *)kvm_null_fn, 689 .on_unlock = (void *)kvm_null_fn, 690 .flush_on_ret = false, 691 .may_block = false, 692 }; 693 694 return __kvm_handle_hva_range(kvm, &range); 695 } 696 697 static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 698 { 699 /* 700 * Skipping invalid memslots is correct if and only change_pte() is 701 * surrounded by invalidate_range_{start,end}(), which is currently 702 * guaranteed by the primary MMU. If that ever changes, KVM needs to 703 * unmap the memslot instead of skipping the memslot to ensure that KVM 704 * doesn't hold references to the old PFN. 705 */ 706 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 707 708 if (range->slot->flags & KVM_MEMSLOT_INVALID) 709 return false; 710 711 return kvm_set_spte_gfn(kvm, range); 712 } 713 714 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 715 struct mm_struct *mm, 716 unsigned long address, 717 pte_t pte) 718 { 719 struct kvm *kvm = mmu_notifier_to_kvm(mn); 720 const union kvm_mmu_notifier_arg arg = { .pte = pte }; 721 722 trace_kvm_set_spte_hva(address); 723 724 /* 725 * .change_pte() must be surrounded by .invalidate_range_{start,end}(). 726 * If mmu_invalidate_in_progress is zero, then no in-progress 727 * invalidations, including this one, found a relevant memslot at 728 * start(); rechecking memslots here is unnecessary. Note, a false 729 * positive (count elevated by a different invalidation) is sub-optimal 730 * but functionally ok. 731 */ 732 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); 733 if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) 734 return; 735 736 kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); 737 } 738 739 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, 740 unsigned long end) 741 { 742 /* 743 * The count increase must become visible at unlock time as no 744 * spte can be established without taking the mmu_lock and 745 * count is also read inside the mmu_lock critical section. 746 */ 747 kvm->mmu_invalidate_in_progress++; 748 if (likely(kvm->mmu_invalidate_in_progress == 1)) { 749 kvm->mmu_invalidate_range_start = start; 750 kvm->mmu_invalidate_range_end = end; 751 } else { 752 /* 753 * Fully tracking multiple concurrent ranges has diminishing 754 * returns. Keep things simple and just find the minimal range 755 * which includes the current and new ranges. As there won't be 756 * enough information to subtract a range after its invalidate 757 * completes, any ranges invalidated concurrently will 758 * accumulate and persist until all outstanding invalidates 759 * complete. 760 */ 761 kvm->mmu_invalidate_range_start = 762 min(kvm->mmu_invalidate_range_start, start); 763 kvm->mmu_invalidate_range_end = 764 max(kvm->mmu_invalidate_range_end, end); 765 } 766 } 767 768 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 769 const struct mmu_notifier_range *range) 770 { 771 struct kvm *kvm = mmu_notifier_to_kvm(mn); 772 const struct kvm_hva_range hva_range = { 773 .start = range->start, 774 .end = range->end, 775 .handler = kvm_unmap_gfn_range, 776 .on_lock = kvm_mmu_invalidate_begin, 777 .on_unlock = kvm_arch_guest_memory_reclaimed, 778 .flush_on_ret = true, 779 .may_block = mmu_notifier_range_blockable(range), 780 }; 781 782 trace_kvm_unmap_hva_range(range->start, range->end); 783 784 /* 785 * Prevent memslot modification between range_start() and range_end() 786 * so that conditionally locking provides the same result in both 787 * functions. Without that guarantee, the mmu_invalidate_in_progress 788 * adjustments will be imbalanced. 789 * 790 * Pairs with the decrement in range_end(). 791 */ 792 spin_lock(&kvm->mn_invalidate_lock); 793 kvm->mn_active_invalidate_count++; 794 spin_unlock(&kvm->mn_invalidate_lock); 795 796 /* 797 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. 798 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 799 * each cache's lock. There are relatively few caches in existence at 800 * any given time, and the caches themselves can check for hva overlap, 801 * i.e. don't need to rely on memslot overlap checks for performance. 802 * Because this runs without holding mmu_lock, the pfn caches must use 803 * mn_active_invalidate_count (see above) instead of 804 * mmu_invalidate_in_progress. 805 */ 806 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, 807 hva_range.may_block); 808 809 __kvm_handle_hva_range(kvm, &hva_range); 810 811 return 0; 812 } 813 814 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, 815 unsigned long end) 816 { 817 /* 818 * This sequence increase will notify the kvm page fault that 819 * the page that is going to be mapped in the spte could have 820 * been freed. 821 */ 822 kvm->mmu_invalidate_seq++; 823 smp_wmb(); 824 /* 825 * The above sequence increase must be visible before the 826 * below count decrease, which is ensured by the smp_wmb above 827 * in conjunction with the smp_rmb in mmu_invalidate_retry(). 828 */ 829 kvm->mmu_invalidate_in_progress--; 830 } 831 832 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 833 const struct mmu_notifier_range *range) 834 { 835 struct kvm *kvm = mmu_notifier_to_kvm(mn); 836 const struct kvm_hva_range hva_range = { 837 .start = range->start, 838 .end = range->end, 839 .handler = (void *)kvm_null_fn, 840 .on_lock = kvm_mmu_invalidate_end, 841 .on_unlock = (void *)kvm_null_fn, 842 .flush_on_ret = false, 843 .may_block = mmu_notifier_range_blockable(range), 844 }; 845 bool wake; 846 847 __kvm_handle_hva_range(kvm, &hva_range); 848 849 /* Pairs with the increment in range_start(). */ 850 spin_lock(&kvm->mn_invalidate_lock); 851 wake = (--kvm->mn_active_invalidate_count == 0); 852 spin_unlock(&kvm->mn_invalidate_lock); 853 854 /* 855 * There can only be one waiter, since the wait happens under 856 * slots_lock. 857 */ 858 if (wake) 859 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 860 861 BUG_ON(kvm->mmu_invalidate_in_progress < 0); 862 } 863 864 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 865 struct mm_struct *mm, 866 unsigned long start, 867 unsigned long end) 868 { 869 trace_kvm_age_hva(start, end); 870 871 return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG, 872 kvm_age_gfn); 873 } 874 875 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 876 struct mm_struct *mm, 877 unsigned long start, 878 unsigned long end) 879 { 880 trace_kvm_age_hva(start, end); 881 882 /* 883 * Even though we do not flush TLB, this will still adversely 884 * affect performance on pre-Haswell Intel EPT, where there is 885 * no EPT Access Bit to clear so that we have to tear down EPT 886 * tables instead. If we find this unacceptable, we can always 887 * add a parameter to kvm_age_hva so that it effectively doesn't 888 * do anything on clear_young. 889 * 890 * Also note that currently we never issue secondary TLB flushes 891 * from clear_young, leaving this job up to the regular system 892 * cadence. If we find this inaccurate, we might come up with a 893 * more sophisticated heuristic later. 894 */ 895 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 896 } 897 898 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 899 struct mm_struct *mm, 900 unsigned long address) 901 { 902 trace_kvm_test_age_hva(address); 903 904 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 905 kvm_test_age_gfn); 906 } 907 908 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 909 struct mm_struct *mm) 910 { 911 struct kvm *kvm = mmu_notifier_to_kvm(mn); 912 int idx; 913 914 idx = srcu_read_lock(&kvm->srcu); 915 kvm_flush_shadow_all(kvm); 916 srcu_read_unlock(&kvm->srcu, idx); 917 } 918 919 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 920 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 921 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 922 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 923 .clear_young = kvm_mmu_notifier_clear_young, 924 .test_young = kvm_mmu_notifier_test_young, 925 .change_pte = kvm_mmu_notifier_change_pte, 926 .release = kvm_mmu_notifier_release, 927 }; 928 929 static int kvm_init_mmu_notifier(struct kvm *kvm) 930 { 931 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 932 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 933 } 934 935 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 936 937 static int kvm_init_mmu_notifier(struct kvm *kvm) 938 { 939 return 0; 940 } 941 942 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 943 944 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 945 static int kvm_pm_notifier_call(struct notifier_block *bl, 946 unsigned long state, 947 void *unused) 948 { 949 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 950 951 return kvm_arch_pm_notifier(kvm, state); 952 } 953 954 static void kvm_init_pm_notifier(struct kvm *kvm) 955 { 956 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 957 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 958 kvm->pm_notifier.priority = INT_MAX; 959 register_pm_notifier(&kvm->pm_notifier); 960 } 961 962 static void kvm_destroy_pm_notifier(struct kvm *kvm) 963 { 964 unregister_pm_notifier(&kvm->pm_notifier); 965 } 966 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 967 static void kvm_init_pm_notifier(struct kvm *kvm) 968 { 969 } 970 971 static void kvm_destroy_pm_notifier(struct kvm *kvm) 972 { 973 } 974 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 975 976 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 977 { 978 if (!memslot->dirty_bitmap) 979 return; 980 981 kvfree(memslot->dirty_bitmap); 982 memslot->dirty_bitmap = NULL; 983 } 984 985 /* This does not remove the slot from struct kvm_memslots data structures */ 986 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 987 { 988 kvm_destroy_dirty_bitmap(slot); 989 990 kvm_arch_free_memslot(kvm, slot); 991 992 kfree(slot); 993 } 994 995 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 996 { 997 struct hlist_node *idnode; 998 struct kvm_memory_slot *memslot; 999 int bkt; 1000 1001 /* 1002 * The same memslot objects live in both active and inactive sets, 1003 * arbitrarily free using index '1' so the second invocation of this 1004 * function isn't operating over a structure with dangling pointers 1005 * (even though this function isn't actually touching them). 1006 */ 1007 if (!slots->node_idx) 1008 return; 1009 1010 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 1011 kvm_free_memslot(kvm, memslot); 1012 } 1013 1014 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 1015 { 1016 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 1017 case KVM_STATS_TYPE_INSTANT: 1018 return 0444; 1019 case KVM_STATS_TYPE_CUMULATIVE: 1020 case KVM_STATS_TYPE_PEAK: 1021 default: 1022 return 0644; 1023 } 1024 } 1025 1026 1027 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 1028 { 1029 int i; 1030 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1031 kvm_vcpu_stats_header.num_desc; 1032 1033 if (IS_ERR(kvm->debugfs_dentry)) 1034 return; 1035 1036 debugfs_remove_recursive(kvm->debugfs_dentry); 1037 1038 if (kvm->debugfs_stat_data) { 1039 for (i = 0; i < kvm_debugfs_num_entries; i++) 1040 kfree(kvm->debugfs_stat_data[i]); 1041 kfree(kvm->debugfs_stat_data); 1042 } 1043 } 1044 1045 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) 1046 { 1047 static DEFINE_MUTEX(kvm_debugfs_lock); 1048 struct dentry *dent; 1049 char dir_name[ITOA_MAX_LEN * 2]; 1050 struct kvm_stat_data *stat_data; 1051 const struct _kvm_stats_desc *pdesc; 1052 int i, ret = -ENOMEM; 1053 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1054 kvm_vcpu_stats_header.num_desc; 1055 1056 if (!debugfs_initialized()) 1057 return 0; 1058 1059 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname); 1060 mutex_lock(&kvm_debugfs_lock); 1061 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 1062 if (dent) { 1063 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 1064 dput(dent); 1065 mutex_unlock(&kvm_debugfs_lock); 1066 return 0; 1067 } 1068 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 1069 mutex_unlock(&kvm_debugfs_lock); 1070 if (IS_ERR(dent)) 1071 return 0; 1072 1073 kvm->debugfs_dentry = dent; 1074 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 1075 sizeof(*kvm->debugfs_stat_data), 1076 GFP_KERNEL_ACCOUNT); 1077 if (!kvm->debugfs_stat_data) 1078 goto out_err; 1079 1080 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 1081 pdesc = &kvm_vm_stats_desc[i]; 1082 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1083 if (!stat_data) 1084 goto out_err; 1085 1086 stat_data->kvm = kvm; 1087 stat_data->desc = pdesc; 1088 stat_data->kind = KVM_STAT_VM; 1089 kvm->debugfs_stat_data[i] = stat_data; 1090 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1091 kvm->debugfs_dentry, stat_data, 1092 &stat_fops_per_vm); 1093 } 1094 1095 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1096 pdesc = &kvm_vcpu_stats_desc[i]; 1097 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1098 if (!stat_data) 1099 goto out_err; 1100 1101 stat_data->kvm = kvm; 1102 stat_data->desc = pdesc; 1103 stat_data->kind = KVM_STAT_VCPU; 1104 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1105 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1106 kvm->debugfs_dentry, stat_data, 1107 &stat_fops_per_vm); 1108 } 1109 1110 ret = kvm_arch_create_vm_debugfs(kvm); 1111 if (ret) 1112 goto out_err; 1113 1114 return 0; 1115 out_err: 1116 kvm_destroy_vm_debugfs(kvm); 1117 return ret; 1118 } 1119 1120 /* 1121 * Called after the VM is otherwise initialized, but just before adding it to 1122 * the vm_list. 1123 */ 1124 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 1125 { 1126 return 0; 1127 } 1128 1129 /* 1130 * Called just after removing the VM from the vm_list, but before doing any 1131 * other destruction. 1132 */ 1133 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1134 { 1135 } 1136 1137 /* 1138 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1139 * be setup already, so we can create arch-specific debugfs entries under it. 1140 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1141 * a per-arch destroy interface is not needed. 1142 */ 1143 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1144 { 1145 return 0; 1146 } 1147 1148 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1149 { 1150 struct kvm *kvm = kvm_arch_alloc_vm(); 1151 struct kvm_memslots *slots; 1152 int r = -ENOMEM; 1153 int i, j; 1154 1155 if (!kvm) 1156 return ERR_PTR(-ENOMEM); 1157 1158 KVM_MMU_LOCK_INIT(kvm); 1159 mmgrab(current->mm); 1160 kvm->mm = current->mm; 1161 kvm_eventfd_init(kvm); 1162 mutex_init(&kvm->lock); 1163 mutex_init(&kvm->irq_lock); 1164 mutex_init(&kvm->slots_lock); 1165 mutex_init(&kvm->slots_arch_lock); 1166 spin_lock_init(&kvm->mn_invalidate_lock); 1167 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1168 xa_init(&kvm->vcpu_array); 1169 1170 INIT_LIST_HEAD(&kvm->gpc_list); 1171 spin_lock_init(&kvm->gpc_lock); 1172 1173 INIT_LIST_HEAD(&kvm->devices); 1174 kvm->max_vcpus = KVM_MAX_VCPUS; 1175 1176 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1177 1178 /* 1179 * Force subsequent debugfs file creations to fail if the VM directory 1180 * is not created (by kvm_create_vm_debugfs()). 1181 */ 1182 kvm->debugfs_dentry = ERR_PTR(-ENOENT); 1183 1184 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", 1185 task_pid_nr(current)); 1186 1187 if (init_srcu_struct(&kvm->srcu)) 1188 goto out_err_no_srcu; 1189 if (init_srcu_struct(&kvm->irq_srcu)) 1190 goto out_err_no_irq_srcu; 1191 1192 refcount_set(&kvm->users_count, 1); 1193 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1194 for (j = 0; j < 2; j++) { 1195 slots = &kvm->__memslots[i][j]; 1196 1197 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1198 slots->hva_tree = RB_ROOT_CACHED; 1199 slots->gfn_tree = RB_ROOT; 1200 hash_init(slots->id_hash); 1201 slots->node_idx = j; 1202 1203 /* Generations must be different for each address space. */ 1204 slots->generation = i; 1205 } 1206 1207 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1208 } 1209 1210 for (i = 0; i < KVM_NR_BUSES; i++) { 1211 rcu_assign_pointer(kvm->buses[i], 1212 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1213 if (!kvm->buses[i]) 1214 goto out_err_no_arch_destroy_vm; 1215 } 1216 1217 r = kvm_arch_init_vm(kvm, type); 1218 if (r) 1219 goto out_err_no_arch_destroy_vm; 1220 1221 r = hardware_enable_all(); 1222 if (r) 1223 goto out_err_no_disable; 1224 1225 #ifdef CONFIG_HAVE_KVM_IRQFD 1226 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1227 #endif 1228 1229 r = kvm_init_mmu_notifier(kvm); 1230 if (r) 1231 goto out_err_no_mmu_notifier; 1232 1233 r = kvm_coalesced_mmio_init(kvm); 1234 if (r < 0) 1235 goto out_no_coalesced_mmio; 1236 1237 r = kvm_create_vm_debugfs(kvm, fdname); 1238 if (r) 1239 goto out_err_no_debugfs; 1240 1241 r = kvm_arch_post_init_vm(kvm); 1242 if (r) 1243 goto out_err; 1244 1245 mutex_lock(&kvm_lock); 1246 list_add(&kvm->vm_list, &vm_list); 1247 mutex_unlock(&kvm_lock); 1248 1249 preempt_notifier_inc(); 1250 kvm_init_pm_notifier(kvm); 1251 1252 return kvm; 1253 1254 out_err: 1255 kvm_destroy_vm_debugfs(kvm); 1256 out_err_no_debugfs: 1257 kvm_coalesced_mmio_free(kvm); 1258 out_no_coalesced_mmio: 1259 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1260 if (kvm->mmu_notifier.ops) 1261 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1262 #endif 1263 out_err_no_mmu_notifier: 1264 hardware_disable_all(); 1265 out_err_no_disable: 1266 kvm_arch_destroy_vm(kvm); 1267 out_err_no_arch_destroy_vm: 1268 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1269 for (i = 0; i < KVM_NR_BUSES; i++) 1270 kfree(kvm_get_bus(kvm, i)); 1271 cleanup_srcu_struct(&kvm->irq_srcu); 1272 out_err_no_irq_srcu: 1273 cleanup_srcu_struct(&kvm->srcu); 1274 out_err_no_srcu: 1275 kvm_arch_free_vm(kvm); 1276 mmdrop(current->mm); 1277 return ERR_PTR(r); 1278 } 1279 1280 static void kvm_destroy_devices(struct kvm *kvm) 1281 { 1282 struct kvm_device *dev, *tmp; 1283 1284 /* 1285 * We do not need to take the kvm->lock here, because nobody else 1286 * has a reference to the struct kvm at this point and therefore 1287 * cannot access the devices list anyhow. 1288 */ 1289 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1290 list_del(&dev->vm_node); 1291 dev->ops->destroy(dev); 1292 } 1293 } 1294 1295 static void kvm_destroy_vm(struct kvm *kvm) 1296 { 1297 int i; 1298 struct mm_struct *mm = kvm->mm; 1299 1300 kvm_destroy_pm_notifier(kvm); 1301 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1302 kvm_destroy_vm_debugfs(kvm); 1303 kvm_arch_sync_events(kvm); 1304 mutex_lock(&kvm_lock); 1305 list_del(&kvm->vm_list); 1306 mutex_unlock(&kvm_lock); 1307 kvm_arch_pre_destroy_vm(kvm); 1308 1309 kvm_free_irq_routing(kvm); 1310 for (i = 0; i < KVM_NR_BUSES; i++) { 1311 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1312 1313 if (bus) 1314 kvm_io_bus_destroy(bus); 1315 kvm->buses[i] = NULL; 1316 } 1317 kvm_coalesced_mmio_free(kvm); 1318 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1319 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1320 /* 1321 * At this point, pending calls to invalidate_range_start() 1322 * have completed but no more MMU notifiers will run, so 1323 * mn_active_invalidate_count may remain unbalanced. 1324 * No threads can be waiting in kvm_swap_active_memslots() as the 1325 * last reference on KVM has been dropped, but freeing 1326 * memslots would deadlock without this manual intervention. 1327 */ 1328 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1329 kvm->mn_active_invalidate_count = 0; 1330 #else 1331 kvm_flush_shadow_all(kvm); 1332 #endif 1333 kvm_arch_destroy_vm(kvm); 1334 kvm_destroy_devices(kvm); 1335 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 1336 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1337 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1338 } 1339 cleanup_srcu_struct(&kvm->irq_srcu); 1340 cleanup_srcu_struct(&kvm->srcu); 1341 kvm_arch_free_vm(kvm); 1342 preempt_notifier_dec(); 1343 hardware_disable_all(); 1344 mmdrop(mm); 1345 } 1346 1347 void kvm_get_kvm(struct kvm *kvm) 1348 { 1349 refcount_inc(&kvm->users_count); 1350 } 1351 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1352 1353 /* 1354 * Make sure the vm is not during destruction, which is a safe version of 1355 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1356 */ 1357 bool kvm_get_kvm_safe(struct kvm *kvm) 1358 { 1359 return refcount_inc_not_zero(&kvm->users_count); 1360 } 1361 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1362 1363 void kvm_put_kvm(struct kvm *kvm) 1364 { 1365 if (refcount_dec_and_test(&kvm->users_count)) 1366 kvm_destroy_vm(kvm); 1367 } 1368 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1369 1370 /* 1371 * Used to put a reference that was taken on behalf of an object associated 1372 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1373 * of the new file descriptor fails and the reference cannot be transferred to 1374 * its final owner. In such cases, the caller is still actively using @kvm and 1375 * will fail miserably if the refcount unexpectedly hits zero. 1376 */ 1377 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1378 { 1379 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1380 } 1381 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1382 1383 static int kvm_vm_release(struct inode *inode, struct file *filp) 1384 { 1385 struct kvm *kvm = filp->private_data; 1386 1387 kvm_irqfd_release(kvm); 1388 1389 kvm_put_kvm(kvm); 1390 return 0; 1391 } 1392 1393 /* 1394 * Allocation size is twice as large as the actual dirty bitmap size. 1395 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1396 */ 1397 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1398 { 1399 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1400 1401 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1402 if (!memslot->dirty_bitmap) 1403 return -ENOMEM; 1404 1405 return 0; 1406 } 1407 1408 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1409 { 1410 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1411 int node_idx_inactive = active->node_idx ^ 1; 1412 1413 return &kvm->__memslots[as_id][node_idx_inactive]; 1414 } 1415 1416 /* 1417 * Helper to get the address space ID when one of memslot pointers may be NULL. 1418 * This also serves as a sanity that at least one of the pointers is non-NULL, 1419 * and that their address space IDs don't diverge. 1420 */ 1421 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1422 struct kvm_memory_slot *b) 1423 { 1424 if (WARN_ON_ONCE(!a && !b)) 1425 return 0; 1426 1427 if (!a) 1428 return b->as_id; 1429 if (!b) 1430 return a->as_id; 1431 1432 WARN_ON_ONCE(a->as_id != b->as_id); 1433 return a->as_id; 1434 } 1435 1436 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1437 struct kvm_memory_slot *slot) 1438 { 1439 struct rb_root *gfn_tree = &slots->gfn_tree; 1440 struct rb_node **node, *parent; 1441 int idx = slots->node_idx; 1442 1443 parent = NULL; 1444 for (node = &gfn_tree->rb_node; *node; ) { 1445 struct kvm_memory_slot *tmp; 1446 1447 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1448 parent = *node; 1449 if (slot->base_gfn < tmp->base_gfn) 1450 node = &(*node)->rb_left; 1451 else if (slot->base_gfn > tmp->base_gfn) 1452 node = &(*node)->rb_right; 1453 else 1454 BUG(); 1455 } 1456 1457 rb_link_node(&slot->gfn_node[idx], parent, node); 1458 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1459 } 1460 1461 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1462 struct kvm_memory_slot *slot) 1463 { 1464 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1465 } 1466 1467 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1468 struct kvm_memory_slot *old, 1469 struct kvm_memory_slot *new) 1470 { 1471 int idx = slots->node_idx; 1472 1473 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1474 1475 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1476 &slots->gfn_tree); 1477 } 1478 1479 /* 1480 * Replace @old with @new in the inactive memslots. 1481 * 1482 * With NULL @old this simply adds @new. 1483 * With NULL @new this simply removes @old. 1484 * 1485 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1486 * appropriately. 1487 */ 1488 static void kvm_replace_memslot(struct kvm *kvm, 1489 struct kvm_memory_slot *old, 1490 struct kvm_memory_slot *new) 1491 { 1492 int as_id = kvm_memslots_get_as_id(old, new); 1493 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1494 int idx = slots->node_idx; 1495 1496 if (old) { 1497 hash_del(&old->id_node[idx]); 1498 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1499 1500 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1501 atomic_long_set(&slots->last_used_slot, (long)new); 1502 1503 if (!new) { 1504 kvm_erase_gfn_node(slots, old); 1505 return; 1506 } 1507 } 1508 1509 /* 1510 * Initialize @new's hva range. Do this even when replacing an @old 1511 * slot, kvm_copy_memslot() deliberately does not touch node data. 1512 */ 1513 new->hva_node[idx].start = new->userspace_addr; 1514 new->hva_node[idx].last = new->userspace_addr + 1515 (new->npages << PAGE_SHIFT) - 1; 1516 1517 /* 1518 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1519 * hva_node needs to be swapped with remove+insert even though hva can't 1520 * change when replacing an existing slot. 1521 */ 1522 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1523 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1524 1525 /* 1526 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1527 * switch the node in the gfn tree instead of removing the old and 1528 * inserting the new as two separate operations. Replacement is a 1529 * single O(1) operation versus two O(log(n)) operations for 1530 * remove+insert. 1531 */ 1532 if (old && old->base_gfn == new->base_gfn) { 1533 kvm_replace_gfn_node(slots, old, new); 1534 } else { 1535 if (old) 1536 kvm_erase_gfn_node(slots, old); 1537 kvm_insert_gfn_node(slots, new); 1538 } 1539 } 1540 1541 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1542 { 1543 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1544 1545 #ifdef __KVM_HAVE_READONLY_MEM 1546 valid_flags |= KVM_MEM_READONLY; 1547 #endif 1548 1549 if (mem->flags & ~valid_flags) 1550 return -EINVAL; 1551 1552 return 0; 1553 } 1554 1555 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1556 { 1557 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1558 1559 /* Grab the generation from the activate memslots. */ 1560 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1561 1562 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1563 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1564 1565 /* 1566 * Do not store the new memslots while there are invalidations in 1567 * progress, otherwise the locking in invalidate_range_start and 1568 * invalidate_range_end will be unbalanced. 1569 */ 1570 spin_lock(&kvm->mn_invalidate_lock); 1571 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1572 while (kvm->mn_active_invalidate_count) { 1573 set_current_state(TASK_UNINTERRUPTIBLE); 1574 spin_unlock(&kvm->mn_invalidate_lock); 1575 schedule(); 1576 spin_lock(&kvm->mn_invalidate_lock); 1577 } 1578 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1579 rcu_assign_pointer(kvm->memslots[as_id], slots); 1580 spin_unlock(&kvm->mn_invalidate_lock); 1581 1582 /* 1583 * Acquired in kvm_set_memslot. Must be released before synchronize 1584 * SRCU below in order to avoid deadlock with another thread 1585 * acquiring the slots_arch_lock in an srcu critical section. 1586 */ 1587 mutex_unlock(&kvm->slots_arch_lock); 1588 1589 synchronize_srcu_expedited(&kvm->srcu); 1590 1591 /* 1592 * Increment the new memslot generation a second time, dropping the 1593 * update in-progress flag and incrementing the generation based on 1594 * the number of address spaces. This provides a unique and easily 1595 * identifiable generation number while the memslots are in flux. 1596 */ 1597 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1598 1599 /* 1600 * Generations must be unique even across address spaces. We do not need 1601 * a global counter for that, instead the generation space is evenly split 1602 * across address spaces. For example, with two address spaces, address 1603 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1604 * use generations 1, 3, 5, ... 1605 */ 1606 gen += KVM_ADDRESS_SPACE_NUM; 1607 1608 kvm_arch_memslots_updated(kvm, gen); 1609 1610 slots->generation = gen; 1611 } 1612 1613 static int kvm_prepare_memory_region(struct kvm *kvm, 1614 const struct kvm_memory_slot *old, 1615 struct kvm_memory_slot *new, 1616 enum kvm_mr_change change) 1617 { 1618 int r; 1619 1620 /* 1621 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1622 * will be freed on "commit". If logging is enabled in both old and 1623 * new, reuse the existing bitmap. If logging is enabled only in the 1624 * new and KVM isn't using a ring buffer, allocate and initialize a 1625 * new bitmap. 1626 */ 1627 if (change != KVM_MR_DELETE) { 1628 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1629 new->dirty_bitmap = NULL; 1630 else if (old && old->dirty_bitmap) 1631 new->dirty_bitmap = old->dirty_bitmap; 1632 else if (kvm_use_dirty_bitmap(kvm)) { 1633 r = kvm_alloc_dirty_bitmap(new); 1634 if (r) 1635 return r; 1636 1637 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1638 bitmap_set(new->dirty_bitmap, 0, new->npages); 1639 } 1640 } 1641 1642 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1643 1644 /* Free the bitmap on failure if it was allocated above. */ 1645 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) 1646 kvm_destroy_dirty_bitmap(new); 1647 1648 return r; 1649 } 1650 1651 static void kvm_commit_memory_region(struct kvm *kvm, 1652 struct kvm_memory_slot *old, 1653 const struct kvm_memory_slot *new, 1654 enum kvm_mr_change change) 1655 { 1656 int old_flags = old ? old->flags : 0; 1657 int new_flags = new ? new->flags : 0; 1658 /* 1659 * Update the total number of memslot pages before calling the arch 1660 * hook so that architectures can consume the result directly. 1661 */ 1662 if (change == KVM_MR_DELETE) 1663 kvm->nr_memslot_pages -= old->npages; 1664 else if (change == KVM_MR_CREATE) 1665 kvm->nr_memslot_pages += new->npages; 1666 1667 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) { 1668 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1; 1669 atomic_set(&kvm->nr_memslots_dirty_logging, 1670 atomic_read(&kvm->nr_memslots_dirty_logging) + change); 1671 } 1672 1673 kvm_arch_commit_memory_region(kvm, old, new, change); 1674 1675 switch (change) { 1676 case KVM_MR_CREATE: 1677 /* Nothing more to do. */ 1678 break; 1679 case KVM_MR_DELETE: 1680 /* Free the old memslot and all its metadata. */ 1681 kvm_free_memslot(kvm, old); 1682 break; 1683 case KVM_MR_MOVE: 1684 case KVM_MR_FLAGS_ONLY: 1685 /* 1686 * Free the dirty bitmap as needed; the below check encompasses 1687 * both the flags and whether a ring buffer is being used) 1688 */ 1689 if (old->dirty_bitmap && !new->dirty_bitmap) 1690 kvm_destroy_dirty_bitmap(old); 1691 1692 /* 1693 * The final quirk. Free the detached, old slot, but only its 1694 * memory, not any metadata. Metadata, including arch specific 1695 * data, may be reused by @new. 1696 */ 1697 kfree(old); 1698 break; 1699 default: 1700 BUG(); 1701 } 1702 } 1703 1704 /* 1705 * Activate @new, which must be installed in the inactive slots by the caller, 1706 * by swapping the active slots and then propagating @new to @old once @old is 1707 * unreachable and can be safely modified. 1708 * 1709 * With NULL @old this simply adds @new to @active (while swapping the sets). 1710 * With NULL @new this simply removes @old from @active and frees it 1711 * (while also swapping the sets). 1712 */ 1713 static void kvm_activate_memslot(struct kvm *kvm, 1714 struct kvm_memory_slot *old, 1715 struct kvm_memory_slot *new) 1716 { 1717 int as_id = kvm_memslots_get_as_id(old, new); 1718 1719 kvm_swap_active_memslots(kvm, as_id); 1720 1721 /* Propagate the new memslot to the now inactive memslots. */ 1722 kvm_replace_memslot(kvm, old, new); 1723 } 1724 1725 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1726 const struct kvm_memory_slot *src) 1727 { 1728 dest->base_gfn = src->base_gfn; 1729 dest->npages = src->npages; 1730 dest->dirty_bitmap = src->dirty_bitmap; 1731 dest->arch = src->arch; 1732 dest->userspace_addr = src->userspace_addr; 1733 dest->flags = src->flags; 1734 dest->id = src->id; 1735 dest->as_id = src->as_id; 1736 } 1737 1738 static void kvm_invalidate_memslot(struct kvm *kvm, 1739 struct kvm_memory_slot *old, 1740 struct kvm_memory_slot *invalid_slot) 1741 { 1742 /* 1743 * Mark the current slot INVALID. As with all memslot modifications, 1744 * this must be done on an unreachable slot to avoid modifying the 1745 * current slot in the active tree. 1746 */ 1747 kvm_copy_memslot(invalid_slot, old); 1748 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1749 kvm_replace_memslot(kvm, old, invalid_slot); 1750 1751 /* 1752 * Activate the slot that is now marked INVALID, but don't propagate 1753 * the slot to the now inactive slots. The slot is either going to be 1754 * deleted or recreated as a new slot. 1755 */ 1756 kvm_swap_active_memslots(kvm, old->as_id); 1757 1758 /* 1759 * From this point no new shadow pages pointing to a deleted, or moved, 1760 * memslot will be created. Validation of sp->gfn happens in: 1761 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1762 * - kvm_is_visible_gfn (mmu_check_root) 1763 */ 1764 kvm_arch_flush_shadow_memslot(kvm, old); 1765 kvm_arch_guest_memory_reclaimed(kvm); 1766 1767 /* Was released by kvm_swap_active_memslots(), reacquire. */ 1768 mutex_lock(&kvm->slots_arch_lock); 1769 1770 /* 1771 * Copy the arch-specific field of the newly-installed slot back to the 1772 * old slot as the arch data could have changed between releasing 1773 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock 1774 * above. Writers are required to retrieve memslots *after* acquiring 1775 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1776 */ 1777 old->arch = invalid_slot->arch; 1778 } 1779 1780 static void kvm_create_memslot(struct kvm *kvm, 1781 struct kvm_memory_slot *new) 1782 { 1783 /* Add the new memslot to the inactive set and activate. */ 1784 kvm_replace_memslot(kvm, NULL, new); 1785 kvm_activate_memslot(kvm, NULL, new); 1786 } 1787 1788 static void kvm_delete_memslot(struct kvm *kvm, 1789 struct kvm_memory_slot *old, 1790 struct kvm_memory_slot *invalid_slot) 1791 { 1792 /* 1793 * Remove the old memslot (in the inactive memslots) by passing NULL as 1794 * the "new" slot, and for the invalid version in the active slots. 1795 */ 1796 kvm_replace_memslot(kvm, old, NULL); 1797 kvm_activate_memslot(kvm, invalid_slot, NULL); 1798 } 1799 1800 static void kvm_move_memslot(struct kvm *kvm, 1801 struct kvm_memory_slot *old, 1802 struct kvm_memory_slot *new, 1803 struct kvm_memory_slot *invalid_slot) 1804 { 1805 /* 1806 * Replace the old memslot in the inactive slots, and then swap slots 1807 * and replace the current INVALID with the new as well. 1808 */ 1809 kvm_replace_memslot(kvm, old, new); 1810 kvm_activate_memslot(kvm, invalid_slot, new); 1811 } 1812 1813 static void kvm_update_flags_memslot(struct kvm *kvm, 1814 struct kvm_memory_slot *old, 1815 struct kvm_memory_slot *new) 1816 { 1817 /* 1818 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1819 * an intermediate step. Instead, the old memslot is simply replaced 1820 * with a new, updated copy in both memslot sets. 1821 */ 1822 kvm_replace_memslot(kvm, old, new); 1823 kvm_activate_memslot(kvm, old, new); 1824 } 1825 1826 static int kvm_set_memslot(struct kvm *kvm, 1827 struct kvm_memory_slot *old, 1828 struct kvm_memory_slot *new, 1829 enum kvm_mr_change change) 1830 { 1831 struct kvm_memory_slot *invalid_slot; 1832 int r; 1833 1834 /* 1835 * Released in kvm_swap_active_memslots(). 1836 * 1837 * Must be held from before the current memslots are copied until after 1838 * the new memslots are installed with rcu_assign_pointer, then 1839 * released before the synchronize srcu in kvm_swap_active_memslots(). 1840 * 1841 * When modifying memslots outside of the slots_lock, must be held 1842 * before reading the pointer to the current memslots until after all 1843 * changes to those memslots are complete. 1844 * 1845 * These rules ensure that installing new memslots does not lose 1846 * changes made to the previous memslots. 1847 */ 1848 mutex_lock(&kvm->slots_arch_lock); 1849 1850 /* 1851 * Invalidate the old slot if it's being deleted or moved. This is 1852 * done prior to actually deleting/moving the memslot to allow vCPUs to 1853 * continue running by ensuring there are no mappings or shadow pages 1854 * for the memslot when it is deleted/moved. Without pre-invalidation 1855 * (and without a lock), a window would exist between effecting the 1856 * delete/move and committing the changes in arch code where KVM or a 1857 * guest could access a non-existent memslot. 1858 * 1859 * Modifications are done on a temporary, unreachable slot. The old 1860 * slot needs to be preserved in case a later step fails and the 1861 * invalidation needs to be reverted. 1862 */ 1863 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1864 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1865 if (!invalid_slot) { 1866 mutex_unlock(&kvm->slots_arch_lock); 1867 return -ENOMEM; 1868 } 1869 kvm_invalidate_memslot(kvm, old, invalid_slot); 1870 } 1871 1872 r = kvm_prepare_memory_region(kvm, old, new, change); 1873 if (r) { 1874 /* 1875 * For DELETE/MOVE, revert the above INVALID change. No 1876 * modifications required since the original slot was preserved 1877 * in the inactive slots. Changing the active memslots also 1878 * release slots_arch_lock. 1879 */ 1880 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1881 kvm_activate_memslot(kvm, invalid_slot, old); 1882 kfree(invalid_slot); 1883 } else { 1884 mutex_unlock(&kvm->slots_arch_lock); 1885 } 1886 return r; 1887 } 1888 1889 /* 1890 * For DELETE and MOVE, the working slot is now active as the INVALID 1891 * version of the old slot. MOVE is particularly special as it reuses 1892 * the old slot and returns a copy of the old slot (in working_slot). 1893 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1894 * old slot is detached but otherwise preserved. 1895 */ 1896 if (change == KVM_MR_CREATE) 1897 kvm_create_memslot(kvm, new); 1898 else if (change == KVM_MR_DELETE) 1899 kvm_delete_memslot(kvm, old, invalid_slot); 1900 else if (change == KVM_MR_MOVE) 1901 kvm_move_memslot(kvm, old, new, invalid_slot); 1902 else if (change == KVM_MR_FLAGS_ONLY) 1903 kvm_update_flags_memslot(kvm, old, new); 1904 else 1905 BUG(); 1906 1907 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1908 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1909 kfree(invalid_slot); 1910 1911 /* 1912 * No need to refresh new->arch, changes after dropping slots_arch_lock 1913 * will directly hit the final, active memslot. Architectures are 1914 * responsible for knowing that new->arch may be stale. 1915 */ 1916 kvm_commit_memory_region(kvm, old, new, change); 1917 1918 return 0; 1919 } 1920 1921 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1922 gfn_t start, gfn_t end) 1923 { 1924 struct kvm_memslot_iter iter; 1925 1926 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1927 if (iter.slot->id != id) 1928 return true; 1929 } 1930 1931 return false; 1932 } 1933 1934 /* 1935 * Allocate some memory and give it an address in the guest physical address 1936 * space. 1937 * 1938 * Discontiguous memory is allowed, mostly for framebuffers. 1939 * 1940 * Must be called holding kvm->slots_lock for write. 1941 */ 1942 int __kvm_set_memory_region(struct kvm *kvm, 1943 const struct kvm_userspace_memory_region *mem) 1944 { 1945 struct kvm_memory_slot *old, *new; 1946 struct kvm_memslots *slots; 1947 enum kvm_mr_change change; 1948 unsigned long npages; 1949 gfn_t base_gfn; 1950 int as_id, id; 1951 int r; 1952 1953 r = check_memory_region_flags(mem); 1954 if (r) 1955 return r; 1956 1957 as_id = mem->slot >> 16; 1958 id = (u16)mem->slot; 1959 1960 /* General sanity checks */ 1961 if ((mem->memory_size & (PAGE_SIZE - 1)) || 1962 (mem->memory_size != (unsigned long)mem->memory_size)) 1963 return -EINVAL; 1964 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1965 return -EINVAL; 1966 /* We can read the guest memory with __xxx_user() later on. */ 1967 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1968 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1969 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1970 mem->memory_size)) 1971 return -EINVAL; 1972 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1973 return -EINVAL; 1974 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1975 return -EINVAL; 1976 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 1977 return -EINVAL; 1978 1979 slots = __kvm_memslots(kvm, as_id); 1980 1981 /* 1982 * Note, the old memslot (and the pointer itself!) may be invalidated 1983 * and/or destroyed by kvm_set_memslot(). 1984 */ 1985 old = id_to_memslot(slots, id); 1986 1987 if (!mem->memory_size) { 1988 if (!old || !old->npages) 1989 return -EINVAL; 1990 1991 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 1992 return -EIO; 1993 1994 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 1995 } 1996 1997 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 1998 npages = (mem->memory_size >> PAGE_SHIFT); 1999 2000 if (!old || !old->npages) { 2001 change = KVM_MR_CREATE; 2002 2003 /* 2004 * To simplify KVM internals, the total number of pages across 2005 * all memslots must fit in an unsigned long. 2006 */ 2007 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 2008 return -EINVAL; 2009 } else { /* Modify an existing slot. */ 2010 if ((mem->userspace_addr != old->userspace_addr) || 2011 (npages != old->npages) || 2012 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 2013 return -EINVAL; 2014 2015 if (base_gfn != old->base_gfn) 2016 change = KVM_MR_MOVE; 2017 else if (mem->flags != old->flags) 2018 change = KVM_MR_FLAGS_ONLY; 2019 else /* Nothing to change. */ 2020 return 0; 2021 } 2022 2023 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 2024 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 2025 return -EEXIST; 2026 2027 /* Allocate a slot that will persist in the memslot. */ 2028 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 2029 if (!new) 2030 return -ENOMEM; 2031 2032 new->as_id = as_id; 2033 new->id = id; 2034 new->base_gfn = base_gfn; 2035 new->npages = npages; 2036 new->flags = mem->flags; 2037 new->userspace_addr = mem->userspace_addr; 2038 2039 r = kvm_set_memslot(kvm, old, new, change); 2040 if (r) 2041 kfree(new); 2042 return r; 2043 } 2044 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 2045 2046 int kvm_set_memory_region(struct kvm *kvm, 2047 const struct kvm_userspace_memory_region *mem) 2048 { 2049 int r; 2050 2051 mutex_lock(&kvm->slots_lock); 2052 r = __kvm_set_memory_region(kvm, mem); 2053 mutex_unlock(&kvm->slots_lock); 2054 return r; 2055 } 2056 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 2057 2058 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 2059 struct kvm_userspace_memory_region *mem) 2060 { 2061 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 2062 return -EINVAL; 2063 2064 return kvm_set_memory_region(kvm, mem); 2065 } 2066 2067 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 2068 /** 2069 * kvm_get_dirty_log - get a snapshot of dirty pages 2070 * @kvm: pointer to kvm instance 2071 * @log: slot id and address to which we copy the log 2072 * @is_dirty: set to '1' if any dirty pages were found 2073 * @memslot: set to the associated memslot, always valid on success 2074 */ 2075 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 2076 int *is_dirty, struct kvm_memory_slot **memslot) 2077 { 2078 struct kvm_memslots *slots; 2079 int i, as_id, id; 2080 unsigned long n; 2081 unsigned long any = 0; 2082 2083 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2084 if (!kvm_use_dirty_bitmap(kvm)) 2085 return -ENXIO; 2086 2087 *memslot = NULL; 2088 *is_dirty = 0; 2089 2090 as_id = log->slot >> 16; 2091 id = (u16)log->slot; 2092 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2093 return -EINVAL; 2094 2095 slots = __kvm_memslots(kvm, as_id); 2096 *memslot = id_to_memslot(slots, id); 2097 if (!(*memslot) || !(*memslot)->dirty_bitmap) 2098 return -ENOENT; 2099 2100 kvm_arch_sync_dirty_log(kvm, *memslot); 2101 2102 n = kvm_dirty_bitmap_bytes(*memslot); 2103 2104 for (i = 0; !any && i < n/sizeof(long); ++i) 2105 any = (*memslot)->dirty_bitmap[i]; 2106 2107 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 2108 return -EFAULT; 2109 2110 if (any) 2111 *is_dirty = 1; 2112 return 0; 2113 } 2114 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 2115 2116 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2117 /** 2118 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2119 * and reenable dirty page tracking for the corresponding pages. 2120 * @kvm: pointer to kvm instance 2121 * @log: slot id and address to which we copy the log 2122 * 2123 * We need to keep it in mind that VCPU threads can write to the bitmap 2124 * concurrently. So, to avoid losing track of dirty pages we keep the 2125 * following order: 2126 * 2127 * 1. Take a snapshot of the bit and clear it if needed. 2128 * 2. Write protect the corresponding page. 2129 * 3. Copy the snapshot to the userspace. 2130 * 4. Upon return caller flushes TLB's if needed. 2131 * 2132 * Between 2 and 4, the guest may write to the page using the remaining TLB 2133 * entry. This is not a problem because the page is reported dirty using 2134 * the snapshot taken before and step 4 ensures that writes done after 2135 * exiting to userspace will be logged for the next call. 2136 * 2137 */ 2138 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2139 { 2140 struct kvm_memslots *slots; 2141 struct kvm_memory_slot *memslot; 2142 int i, as_id, id; 2143 unsigned long n; 2144 unsigned long *dirty_bitmap; 2145 unsigned long *dirty_bitmap_buffer; 2146 bool flush; 2147 2148 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2149 if (!kvm_use_dirty_bitmap(kvm)) 2150 return -ENXIO; 2151 2152 as_id = log->slot >> 16; 2153 id = (u16)log->slot; 2154 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2155 return -EINVAL; 2156 2157 slots = __kvm_memslots(kvm, as_id); 2158 memslot = id_to_memslot(slots, id); 2159 if (!memslot || !memslot->dirty_bitmap) 2160 return -ENOENT; 2161 2162 dirty_bitmap = memslot->dirty_bitmap; 2163 2164 kvm_arch_sync_dirty_log(kvm, memslot); 2165 2166 n = kvm_dirty_bitmap_bytes(memslot); 2167 flush = false; 2168 if (kvm->manual_dirty_log_protect) { 2169 /* 2170 * Unlike kvm_get_dirty_log, we always return false in *flush, 2171 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2172 * is some code duplication between this function and 2173 * kvm_get_dirty_log, but hopefully all architecture 2174 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2175 * can be eliminated. 2176 */ 2177 dirty_bitmap_buffer = dirty_bitmap; 2178 } else { 2179 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2180 memset(dirty_bitmap_buffer, 0, n); 2181 2182 KVM_MMU_LOCK(kvm); 2183 for (i = 0; i < n / sizeof(long); i++) { 2184 unsigned long mask; 2185 gfn_t offset; 2186 2187 if (!dirty_bitmap[i]) 2188 continue; 2189 2190 flush = true; 2191 mask = xchg(&dirty_bitmap[i], 0); 2192 dirty_bitmap_buffer[i] = mask; 2193 2194 offset = i * BITS_PER_LONG; 2195 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2196 offset, mask); 2197 } 2198 KVM_MMU_UNLOCK(kvm); 2199 } 2200 2201 if (flush) 2202 kvm_flush_remote_tlbs_memslot(kvm, memslot); 2203 2204 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2205 return -EFAULT; 2206 return 0; 2207 } 2208 2209 2210 /** 2211 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2212 * @kvm: kvm instance 2213 * @log: slot id and address to which we copy the log 2214 * 2215 * Steps 1-4 below provide general overview of dirty page logging. See 2216 * kvm_get_dirty_log_protect() function description for additional details. 2217 * 2218 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2219 * always flush the TLB (step 4) even if previous step failed and the dirty 2220 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2221 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2222 * writes will be marked dirty for next log read. 2223 * 2224 * 1. Take a snapshot of the bit and clear it if needed. 2225 * 2. Write protect the corresponding page. 2226 * 3. Copy the snapshot to the userspace. 2227 * 4. Flush TLB's if needed. 2228 */ 2229 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2230 struct kvm_dirty_log *log) 2231 { 2232 int r; 2233 2234 mutex_lock(&kvm->slots_lock); 2235 2236 r = kvm_get_dirty_log_protect(kvm, log); 2237 2238 mutex_unlock(&kvm->slots_lock); 2239 return r; 2240 } 2241 2242 /** 2243 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2244 * and reenable dirty page tracking for the corresponding pages. 2245 * @kvm: pointer to kvm instance 2246 * @log: slot id and address from which to fetch the bitmap of dirty pages 2247 */ 2248 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2249 struct kvm_clear_dirty_log *log) 2250 { 2251 struct kvm_memslots *slots; 2252 struct kvm_memory_slot *memslot; 2253 int as_id, id; 2254 gfn_t offset; 2255 unsigned long i, n; 2256 unsigned long *dirty_bitmap; 2257 unsigned long *dirty_bitmap_buffer; 2258 bool flush; 2259 2260 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2261 if (!kvm_use_dirty_bitmap(kvm)) 2262 return -ENXIO; 2263 2264 as_id = log->slot >> 16; 2265 id = (u16)log->slot; 2266 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 2267 return -EINVAL; 2268 2269 if (log->first_page & 63) 2270 return -EINVAL; 2271 2272 slots = __kvm_memslots(kvm, as_id); 2273 memslot = id_to_memslot(slots, id); 2274 if (!memslot || !memslot->dirty_bitmap) 2275 return -ENOENT; 2276 2277 dirty_bitmap = memslot->dirty_bitmap; 2278 2279 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2280 2281 if (log->first_page > memslot->npages || 2282 log->num_pages > memslot->npages - log->first_page || 2283 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2284 return -EINVAL; 2285 2286 kvm_arch_sync_dirty_log(kvm, memslot); 2287 2288 flush = false; 2289 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2290 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2291 return -EFAULT; 2292 2293 KVM_MMU_LOCK(kvm); 2294 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2295 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2296 i++, offset += BITS_PER_LONG) { 2297 unsigned long mask = *dirty_bitmap_buffer++; 2298 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2299 if (!mask) 2300 continue; 2301 2302 mask &= atomic_long_fetch_andnot(mask, p); 2303 2304 /* 2305 * mask contains the bits that really have been cleared. This 2306 * never includes any bits beyond the length of the memslot (if 2307 * the length is not aligned to 64 pages), therefore it is not 2308 * a problem if userspace sets them in log->dirty_bitmap. 2309 */ 2310 if (mask) { 2311 flush = true; 2312 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2313 offset, mask); 2314 } 2315 } 2316 KVM_MMU_UNLOCK(kvm); 2317 2318 if (flush) 2319 kvm_flush_remote_tlbs_memslot(kvm, memslot); 2320 2321 return 0; 2322 } 2323 2324 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2325 struct kvm_clear_dirty_log *log) 2326 { 2327 int r; 2328 2329 mutex_lock(&kvm->slots_lock); 2330 2331 r = kvm_clear_dirty_log_protect(kvm, log); 2332 2333 mutex_unlock(&kvm->slots_lock); 2334 return r; 2335 } 2336 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2337 2338 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2339 { 2340 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2341 } 2342 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2343 2344 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2345 { 2346 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2347 u64 gen = slots->generation; 2348 struct kvm_memory_slot *slot; 2349 2350 /* 2351 * This also protects against using a memslot from a different address space, 2352 * since different address spaces have different generation numbers. 2353 */ 2354 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2355 vcpu->last_used_slot = NULL; 2356 vcpu->last_used_slot_gen = gen; 2357 } 2358 2359 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2360 if (slot) 2361 return slot; 2362 2363 /* 2364 * Fall back to searching all memslots. We purposely use 2365 * search_memslots() instead of __gfn_to_memslot() to avoid 2366 * thrashing the VM-wide last_used_slot in kvm_memslots. 2367 */ 2368 slot = search_memslots(slots, gfn, false); 2369 if (slot) { 2370 vcpu->last_used_slot = slot; 2371 return slot; 2372 } 2373 2374 return NULL; 2375 } 2376 2377 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2378 { 2379 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2380 2381 return kvm_is_visible_memslot(memslot); 2382 } 2383 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2384 2385 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2386 { 2387 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2388 2389 return kvm_is_visible_memslot(memslot); 2390 } 2391 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2392 2393 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2394 { 2395 struct vm_area_struct *vma; 2396 unsigned long addr, size; 2397 2398 size = PAGE_SIZE; 2399 2400 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2401 if (kvm_is_error_hva(addr)) 2402 return PAGE_SIZE; 2403 2404 mmap_read_lock(current->mm); 2405 vma = find_vma(current->mm, addr); 2406 if (!vma) 2407 goto out; 2408 2409 size = vma_kernel_pagesize(vma); 2410 2411 out: 2412 mmap_read_unlock(current->mm); 2413 2414 return size; 2415 } 2416 2417 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2418 { 2419 return slot->flags & KVM_MEM_READONLY; 2420 } 2421 2422 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2423 gfn_t *nr_pages, bool write) 2424 { 2425 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2426 return KVM_HVA_ERR_BAD; 2427 2428 if (memslot_is_readonly(slot) && write) 2429 return KVM_HVA_ERR_RO_BAD; 2430 2431 if (nr_pages) 2432 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2433 2434 return __gfn_to_hva_memslot(slot, gfn); 2435 } 2436 2437 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2438 gfn_t *nr_pages) 2439 { 2440 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2441 } 2442 2443 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2444 gfn_t gfn) 2445 { 2446 return gfn_to_hva_many(slot, gfn, NULL); 2447 } 2448 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2449 2450 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2451 { 2452 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2453 } 2454 EXPORT_SYMBOL_GPL(gfn_to_hva); 2455 2456 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2457 { 2458 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2459 } 2460 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2461 2462 /* 2463 * Return the hva of a @gfn and the R/W attribute if possible. 2464 * 2465 * @slot: the kvm_memory_slot which contains @gfn 2466 * @gfn: the gfn to be translated 2467 * @writable: used to return the read/write attribute of the @slot if the hva 2468 * is valid and @writable is not NULL 2469 */ 2470 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2471 gfn_t gfn, bool *writable) 2472 { 2473 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2474 2475 if (!kvm_is_error_hva(hva) && writable) 2476 *writable = !memslot_is_readonly(slot); 2477 2478 return hva; 2479 } 2480 2481 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2482 { 2483 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2484 2485 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2486 } 2487 2488 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2489 { 2490 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2491 2492 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2493 } 2494 2495 static inline int check_user_page_hwpoison(unsigned long addr) 2496 { 2497 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2498 2499 rc = get_user_pages(addr, 1, flags, NULL); 2500 return rc == -EHWPOISON; 2501 } 2502 2503 /* 2504 * The fast path to get the writable pfn which will be stored in @pfn, 2505 * true indicates success, otherwise false is returned. It's also the 2506 * only part that runs if we can in atomic context. 2507 */ 2508 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2509 bool *writable, kvm_pfn_t *pfn) 2510 { 2511 struct page *page[1]; 2512 2513 /* 2514 * Fast pin a writable pfn only if it is a write fault request 2515 * or the caller allows to map a writable pfn for a read fault 2516 * request. 2517 */ 2518 if (!(write_fault || writable)) 2519 return false; 2520 2521 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2522 *pfn = page_to_pfn(page[0]); 2523 2524 if (writable) 2525 *writable = true; 2526 return true; 2527 } 2528 2529 return false; 2530 } 2531 2532 /* 2533 * The slow path to get the pfn of the specified host virtual address, 2534 * 1 indicates success, -errno is returned if error is detected. 2535 */ 2536 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2537 bool interruptible, bool *writable, kvm_pfn_t *pfn) 2538 { 2539 /* 2540 * When a VCPU accesses a page that is not mapped into the secondary 2541 * MMU, we lookup the page using GUP to map it, so the guest VCPU can 2542 * make progress. We always want to honor NUMA hinting faults in that 2543 * case, because GUP usage corresponds to memory accesses from the VCPU. 2544 * Otherwise, we'd not trigger NUMA hinting faults once a page is 2545 * mapped into the secondary MMU and gets accessed by a VCPU. 2546 * 2547 * Note that get_user_page_fast_only() and FOLL_WRITE for now 2548 * implicitly honor NUMA hinting faults and don't need this flag. 2549 */ 2550 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT; 2551 struct page *page; 2552 int npages; 2553 2554 might_sleep(); 2555 2556 if (writable) 2557 *writable = write_fault; 2558 2559 if (write_fault) 2560 flags |= FOLL_WRITE; 2561 if (async) 2562 flags |= FOLL_NOWAIT; 2563 if (interruptible) 2564 flags |= FOLL_INTERRUPTIBLE; 2565 2566 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2567 if (npages != 1) 2568 return npages; 2569 2570 /* map read fault as writable if possible */ 2571 if (unlikely(!write_fault) && writable) { 2572 struct page *wpage; 2573 2574 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2575 *writable = true; 2576 put_page(page); 2577 page = wpage; 2578 } 2579 } 2580 *pfn = page_to_pfn(page); 2581 return npages; 2582 } 2583 2584 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2585 { 2586 if (unlikely(!(vma->vm_flags & VM_READ))) 2587 return false; 2588 2589 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2590 return false; 2591 2592 return true; 2593 } 2594 2595 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2596 { 2597 struct page *page = kvm_pfn_to_refcounted_page(pfn); 2598 2599 if (!page) 2600 return 1; 2601 2602 return get_page_unless_zero(page); 2603 } 2604 2605 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2606 unsigned long addr, bool write_fault, 2607 bool *writable, kvm_pfn_t *p_pfn) 2608 { 2609 kvm_pfn_t pfn; 2610 pte_t *ptep; 2611 pte_t pte; 2612 spinlock_t *ptl; 2613 int r; 2614 2615 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2616 if (r) { 2617 /* 2618 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2619 * not call the fault handler, so do it here. 2620 */ 2621 bool unlocked = false; 2622 r = fixup_user_fault(current->mm, addr, 2623 (write_fault ? FAULT_FLAG_WRITE : 0), 2624 &unlocked); 2625 if (unlocked) 2626 return -EAGAIN; 2627 if (r) 2628 return r; 2629 2630 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2631 if (r) 2632 return r; 2633 } 2634 2635 pte = ptep_get(ptep); 2636 2637 if (write_fault && !pte_write(pte)) { 2638 pfn = KVM_PFN_ERR_RO_FAULT; 2639 goto out; 2640 } 2641 2642 if (writable) 2643 *writable = pte_write(pte); 2644 pfn = pte_pfn(pte); 2645 2646 /* 2647 * Get a reference here because callers of *hva_to_pfn* and 2648 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2649 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2650 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will 2651 * simply do nothing for reserved pfns. 2652 * 2653 * Whoever called remap_pfn_range is also going to call e.g. 2654 * unmap_mapping_range before the underlying pages are freed, 2655 * causing a call to our MMU notifier. 2656 * 2657 * Certain IO or PFNMAP mappings can be backed with valid 2658 * struct pages, but be allocated without refcounting e.g., 2659 * tail pages of non-compound higher order allocations, which 2660 * would then underflow the refcount when the caller does the 2661 * required put_page. Don't allow those pages here. 2662 */ 2663 if (!kvm_try_get_pfn(pfn)) 2664 r = -EFAULT; 2665 2666 out: 2667 pte_unmap_unlock(ptep, ptl); 2668 *p_pfn = pfn; 2669 2670 return r; 2671 } 2672 2673 /* 2674 * Pin guest page in memory and return its pfn. 2675 * @addr: host virtual address which maps memory to the guest 2676 * @atomic: whether this function can sleep 2677 * @interruptible: whether the process can be interrupted by non-fatal signals 2678 * @async: whether this function need to wait IO complete if the 2679 * host page is not in the memory 2680 * @write_fault: whether we should get a writable host page 2681 * @writable: whether it allows to map a writable host page for !@write_fault 2682 * 2683 * The function will map a writable host page for these two cases: 2684 * 1): @write_fault = true 2685 * 2): @write_fault = false && @writable, @writable will tell the caller 2686 * whether the mapping is writable. 2687 */ 2688 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, 2689 bool *async, bool write_fault, bool *writable) 2690 { 2691 struct vm_area_struct *vma; 2692 kvm_pfn_t pfn; 2693 int npages, r; 2694 2695 /* we can do it either atomically or asynchronously, not both */ 2696 BUG_ON(atomic && async); 2697 2698 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2699 return pfn; 2700 2701 if (atomic) 2702 return KVM_PFN_ERR_FAULT; 2703 2704 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible, 2705 writable, &pfn); 2706 if (npages == 1) 2707 return pfn; 2708 if (npages == -EINTR) 2709 return KVM_PFN_ERR_SIGPENDING; 2710 2711 mmap_read_lock(current->mm); 2712 if (npages == -EHWPOISON || 2713 (!async && check_user_page_hwpoison(addr))) { 2714 pfn = KVM_PFN_ERR_HWPOISON; 2715 goto exit; 2716 } 2717 2718 retry: 2719 vma = vma_lookup(current->mm, addr); 2720 2721 if (vma == NULL) 2722 pfn = KVM_PFN_ERR_FAULT; 2723 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2724 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); 2725 if (r == -EAGAIN) 2726 goto retry; 2727 if (r < 0) 2728 pfn = KVM_PFN_ERR_FAULT; 2729 } else { 2730 if (async && vma_is_valid(vma, write_fault)) 2731 *async = true; 2732 pfn = KVM_PFN_ERR_FAULT; 2733 } 2734 exit: 2735 mmap_read_unlock(current->mm); 2736 return pfn; 2737 } 2738 2739 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, 2740 bool atomic, bool interruptible, bool *async, 2741 bool write_fault, bool *writable, hva_t *hva) 2742 { 2743 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2744 2745 if (hva) 2746 *hva = addr; 2747 2748 if (addr == KVM_HVA_ERR_RO_BAD) { 2749 if (writable) 2750 *writable = false; 2751 return KVM_PFN_ERR_RO_FAULT; 2752 } 2753 2754 if (kvm_is_error_hva(addr)) { 2755 if (writable) 2756 *writable = false; 2757 return KVM_PFN_NOSLOT; 2758 } 2759 2760 /* Do not map writable pfn in the readonly memslot. */ 2761 if (writable && memslot_is_readonly(slot)) { 2762 *writable = false; 2763 writable = NULL; 2764 } 2765 2766 return hva_to_pfn(addr, atomic, interruptible, async, write_fault, 2767 writable); 2768 } 2769 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2770 2771 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2772 bool *writable) 2773 { 2774 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false, 2775 NULL, write_fault, writable, NULL); 2776 } 2777 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2778 2779 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 2780 { 2781 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true, 2782 NULL, NULL); 2783 } 2784 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2785 2786 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) 2787 { 2788 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true, 2789 NULL, NULL); 2790 } 2791 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2792 2793 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2794 { 2795 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2796 } 2797 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2798 2799 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2800 { 2801 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2802 } 2803 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2804 2805 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2806 { 2807 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2808 } 2809 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2810 2811 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2812 struct page **pages, int nr_pages) 2813 { 2814 unsigned long addr; 2815 gfn_t entry = 0; 2816 2817 addr = gfn_to_hva_many(slot, gfn, &entry); 2818 if (kvm_is_error_hva(addr)) 2819 return -1; 2820 2821 if (entry < nr_pages) 2822 return 0; 2823 2824 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2825 } 2826 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2827 2828 /* 2829 * Do not use this helper unless you are absolutely certain the gfn _must_ be 2830 * backed by 'struct page'. A valid example is if the backing memslot is 2831 * controlled by KVM. Note, if the returned page is valid, it's refcount has 2832 * been elevated by gfn_to_pfn(). 2833 */ 2834 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2835 { 2836 struct page *page; 2837 kvm_pfn_t pfn; 2838 2839 pfn = gfn_to_pfn(kvm, gfn); 2840 2841 if (is_error_noslot_pfn(pfn)) 2842 return KVM_ERR_PTR_BAD_PAGE; 2843 2844 page = kvm_pfn_to_refcounted_page(pfn); 2845 if (!page) 2846 return KVM_ERR_PTR_BAD_PAGE; 2847 2848 return page; 2849 } 2850 EXPORT_SYMBOL_GPL(gfn_to_page); 2851 2852 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) 2853 { 2854 if (dirty) 2855 kvm_release_pfn_dirty(pfn); 2856 else 2857 kvm_release_pfn_clean(pfn); 2858 } 2859 2860 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2861 { 2862 kvm_pfn_t pfn; 2863 void *hva = NULL; 2864 struct page *page = KVM_UNMAPPED_PAGE; 2865 2866 if (!map) 2867 return -EINVAL; 2868 2869 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2870 if (is_error_noslot_pfn(pfn)) 2871 return -EINVAL; 2872 2873 if (pfn_valid(pfn)) { 2874 page = pfn_to_page(pfn); 2875 hva = kmap(page); 2876 #ifdef CONFIG_HAS_IOMEM 2877 } else { 2878 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2879 #endif 2880 } 2881 2882 if (!hva) 2883 return -EFAULT; 2884 2885 map->page = page; 2886 map->hva = hva; 2887 map->pfn = pfn; 2888 map->gfn = gfn; 2889 2890 return 0; 2891 } 2892 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2893 2894 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2895 { 2896 if (!map) 2897 return; 2898 2899 if (!map->hva) 2900 return; 2901 2902 if (map->page != KVM_UNMAPPED_PAGE) 2903 kunmap(map->page); 2904 #ifdef CONFIG_HAS_IOMEM 2905 else 2906 memunmap(map->hva); 2907 #endif 2908 2909 if (dirty) 2910 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 2911 2912 kvm_release_pfn(map->pfn, dirty); 2913 2914 map->hva = NULL; 2915 map->page = NULL; 2916 } 2917 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2918 2919 static bool kvm_is_ad_tracked_page(struct page *page) 2920 { 2921 /* 2922 * Per page-flags.h, pages tagged PG_reserved "should in general not be 2923 * touched (e.g. set dirty) except by its owner". 2924 */ 2925 return !PageReserved(page); 2926 } 2927 2928 static void kvm_set_page_dirty(struct page *page) 2929 { 2930 if (kvm_is_ad_tracked_page(page)) 2931 SetPageDirty(page); 2932 } 2933 2934 static void kvm_set_page_accessed(struct page *page) 2935 { 2936 if (kvm_is_ad_tracked_page(page)) 2937 mark_page_accessed(page); 2938 } 2939 2940 void kvm_release_page_clean(struct page *page) 2941 { 2942 WARN_ON(is_error_page(page)); 2943 2944 kvm_set_page_accessed(page); 2945 put_page(page); 2946 } 2947 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2948 2949 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2950 { 2951 struct page *page; 2952 2953 if (is_error_noslot_pfn(pfn)) 2954 return; 2955 2956 page = kvm_pfn_to_refcounted_page(pfn); 2957 if (!page) 2958 return; 2959 2960 kvm_release_page_clean(page); 2961 } 2962 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2963 2964 void kvm_release_page_dirty(struct page *page) 2965 { 2966 WARN_ON(is_error_page(page)); 2967 2968 kvm_set_page_dirty(page); 2969 kvm_release_page_clean(page); 2970 } 2971 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2972 2973 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2974 { 2975 struct page *page; 2976 2977 if (is_error_noslot_pfn(pfn)) 2978 return; 2979 2980 page = kvm_pfn_to_refcounted_page(pfn); 2981 if (!page) 2982 return; 2983 2984 kvm_release_page_dirty(page); 2985 } 2986 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2987 2988 /* 2989 * Note, checking for an error/noslot pfn is the caller's responsibility when 2990 * directly marking a page dirty/accessed. Unlike the "release" helpers, the 2991 * "set" helpers are not to be used when the pfn might point at garbage. 2992 */ 2993 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2994 { 2995 if (WARN_ON(is_error_noslot_pfn(pfn))) 2996 return; 2997 2998 if (pfn_valid(pfn)) 2999 kvm_set_page_dirty(pfn_to_page(pfn)); 3000 } 3001 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 3002 3003 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 3004 { 3005 if (WARN_ON(is_error_noslot_pfn(pfn))) 3006 return; 3007 3008 if (pfn_valid(pfn)) 3009 kvm_set_page_accessed(pfn_to_page(pfn)); 3010 } 3011 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 3012 3013 static int next_segment(unsigned long len, int offset) 3014 { 3015 if (len > PAGE_SIZE - offset) 3016 return PAGE_SIZE - offset; 3017 else 3018 return len; 3019 } 3020 3021 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 3022 void *data, int offset, int len) 3023 { 3024 int r; 3025 unsigned long addr; 3026 3027 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3028 if (kvm_is_error_hva(addr)) 3029 return -EFAULT; 3030 r = __copy_from_user(data, (void __user *)addr + offset, len); 3031 if (r) 3032 return -EFAULT; 3033 return 0; 3034 } 3035 3036 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 3037 int len) 3038 { 3039 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3040 3041 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3042 } 3043 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 3044 3045 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 3046 int offset, int len) 3047 { 3048 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3049 3050 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3051 } 3052 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 3053 3054 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 3055 { 3056 gfn_t gfn = gpa >> PAGE_SHIFT; 3057 int seg; 3058 int offset = offset_in_page(gpa); 3059 int ret; 3060 3061 while ((seg = next_segment(len, offset)) != 0) { 3062 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 3063 if (ret < 0) 3064 return ret; 3065 offset = 0; 3066 len -= seg; 3067 data += seg; 3068 ++gfn; 3069 } 3070 return 0; 3071 } 3072 EXPORT_SYMBOL_GPL(kvm_read_guest); 3073 3074 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 3075 { 3076 gfn_t gfn = gpa >> PAGE_SHIFT; 3077 int seg; 3078 int offset = offset_in_page(gpa); 3079 int ret; 3080 3081 while ((seg = next_segment(len, offset)) != 0) { 3082 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 3083 if (ret < 0) 3084 return ret; 3085 offset = 0; 3086 len -= seg; 3087 data += seg; 3088 ++gfn; 3089 } 3090 return 0; 3091 } 3092 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 3093 3094 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 3095 void *data, int offset, unsigned long len) 3096 { 3097 int r; 3098 unsigned long addr; 3099 3100 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3101 if (kvm_is_error_hva(addr)) 3102 return -EFAULT; 3103 pagefault_disable(); 3104 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 3105 pagefault_enable(); 3106 if (r) 3107 return -EFAULT; 3108 return 0; 3109 } 3110 3111 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 3112 void *data, unsigned long len) 3113 { 3114 gfn_t gfn = gpa >> PAGE_SHIFT; 3115 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3116 int offset = offset_in_page(gpa); 3117 3118 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 3119 } 3120 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 3121 3122 static int __kvm_write_guest_page(struct kvm *kvm, 3123 struct kvm_memory_slot *memslot, gfn_t gfn, 3124 const void *data, int offset, int len) 3125 { 3126 int r; 3127 unsigned long addr; 3128 3129 addr = gfn_to_hva_memslot(memslot, gfn); 3130 if (kvm_is_error_hva(addr)) 3131 return -EFAULT; 3132 r = __copy_to_user((void __user *)addr + offset, data, len); 3133 if (r) 3134 return -EFAULT; 3135 mark_page_dirty_in_slot(kvm, memslot, gfn); 3136 return 0; 3137 } 3138 3139 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 3140 const void *data, int offset, int len) 3141 { 3142 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3143 3144 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 3145 } 3146 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 3147 3148 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 3149 const void *data, int offset, int len) 3150 { 3151 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3152 3153 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 3154 } 3155 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 3156 3157 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 3158 unsigned long len) 3159 { 3160 gfn_t gfn = gpa >> PAGE_SHIFT; 3161 int seg; 3162 int offset = offset_in_page(gpa); 3163 int ret; 3164 3165 while ((seg = next_segment(len, offset)) != 0) { 3166 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 3167 if (ret < 0) 3168 return ret; 3169 offset = 0; 3170 len -= seg; 3171 data += seg; 3172 ++gfn; 3173 } 3174 return 0; 3175 } 3176 EXPORT_SYMBOL_GPL(kvm_write_guest); 3177 3178 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 3179 unsigned long len) 3180 { 3181 gfn_t gfn = gpa >> PAGE_SHIFT; 3182 int seg; 3183 int offset = offset_in_page(gpa); 3184 int ret; 3185 3186 while ((seg = next_segment(len, offset)) != 0) { 3187 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3188 if (ret < 0) 3189 return ret; 3190 offset = 0; 3191 len -= seg; 3192 data += seg; 3193 ++gfn; 3194 } 3195 return 0; 3196 } 3197 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3198 3199 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3200 struct gfn_to_hva_cache *ghc, 3201 gpa_t gpa, unsigned long len) 3202 { 3203 int offset = offset_in_page(gpa); 3204 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3205 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3206 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3207 gfn_t nr_pages_avail; 3208 3209 /* Update ghc->generation before performing any error checks. */ 3210 ghc->generation = slots->generation; 3211 3212 if (start_gfn > end_gfn) { 3213 ghc->hva = KVM_HVA_ERR_BAD; 3214 return -EINVAL; 3215 } 3216 3217 /* 3218 * If the requested region crosses two memslots, we still 3219 * verify that the entire region is valid here. 3220 */ 3221 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3222 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3223 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3224 &nr_pages_avail); 3225 if (kvm_is_error_hva(ghc->hva)) 3226 return -EFAULT; 3227 } 3228 3229 /* Use the slow path for cross page reads and writes. */ 3230 if (nr_pages_needed == 1) 3231 ghc->hva += offset; 3232 else 3233 ghc->memslot = NULL; 3234 3235 ghc->gpa = gpa; 3236 ghc->len = len; 3237 return 0; 3238 } 3239 3240 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3241 gpa_t gpa, unsigned long len) 3242 { 3243 struct kvm_memslots *slots = kvm_memslots(kvm); 3244 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3245 } 3246 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3247 3248 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3249 void *data, unsigned int offset, 3250 unsigned long len) 3251 { 3252 struct kvm_memslots *slots = kvm_memslots(kvm); 3253 int r; 3254 gpa_t gpa = ghc->gpa + offset; 3255 3256 if (WARN_ON_ONCE(len + offset > ghc->len)) 3257 return -EINVAL; 3258 3259 if (slots->generation != ghc->generation) { 3260 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3261 return -EFAULT; 3262 } 3263 3264 if (kvm_is_error_hva(ghc->hva)) 3265 return -EFAULT; 3266 3267 if (unlikely(!ghc->memslot)) 3268 return kvm_write_guest(kvm, gpa, data, len); 3269 3270 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3271 if (r) 3272 return -EFAULT; 3273 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3274 3275 return 0; 3276 } 3277 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3278 3279 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3280 void *data, unsigned long len) 3281 { 3282 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3283 } 3284 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3285 3286 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3287 void *data, unsigned int offset, 3288 unsigned long len) 3289 { 3290 struct kvm_memslots *slots = kvm_memslots(kvm); 3291 int r; 3292 gpa_t gpa = ghc->gpa + offset; 3293 3294 if (WARN_ON_ONCE(len + offset > ghc->len)) 3295 return -EINVAL; 3296 3297 if (slots->generation != ghc->generation) { 3298 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3299 return -EFAULT; 3300 } 3301 3302 if (kvm_is_error_hva(ghc->hva)) 3303 return -EFAULT; 3304 3305 if (unlikely(!ghc->memslot)) 3306 return kvm_read_guest(kvm, gpa, data, len); 3307 3308 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3309 if (r) 3310 return -EFAULT; 3311 3312 return 0; 3313 } 3314 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3315 3316 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3317 void *data, unsigned long len) 3318 { 3319 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3320 } 3321 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3322 3323 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3324 { 3325 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3326 gfn_t gfn = gpa >> PAGE_SHIFT; 3327 int seg; 3328 int offset = offset_in_page(gpa); 3329 int ret; 3330 3331 while ((seg = next_segment(len, offset)) != 0) { 3332 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 3333 if (ret < 0) 3334 return ret; 3335 offset = 0; 3336 len -= seg; 3337 ++gfn; 3338 } 3339 return 0; 3340 } 3341 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3342 3343 void mark_page_dirty_in_slot(struct kvm *kvm, 3344 const struct kvm_memory_slot *memslot, 3345 gfn_t gfn) 3346 { 3347 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3348 3349 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3350 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) 3351 return; 3352 3353 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); 3354 #endif 3355 3356 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3357 unsigned long rel_gfn = gfn - memslot->base_gfn; 3358 u32 slot = (memslot->as_id << 16) | memslot->id; 3359 3360 if (kvm->dirty_ring_size && vcpu) 3361 kvm_dirty_ring_push(vcpu, slot, rel_gfn); 3362 else if (memslot->dirty_bitmap) 3363 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3364 } 3365 } 3366 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3367 3368 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3369 { 3370 struct kvm_memory_slot *memslot; 3371 3372 memslot = gfn_to_memslot(kvm, gfn); 3373 mark_page_dirty_in_slot(kvm, memslot, gfn); 3374 } 3375 EXPORT_SYMBOL_GPL(mark_page_dirty); 3376 3377 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3378 { 3379 struct kvm_memory_slot *memslot; 3380 3381 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3382 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3383 } 3384 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3385 3386 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3387 { 3388 if (!vcpu->sigset_active) 3389 return; 3390 3391 /* 3392 * This does a lockless modification of ->real_blocked, which is fine 3393 * because, only current can change ->real_blocked and all readers of 3394 * ->real_blocked don't care as long ->real_blocked is always a subset 3395 * of ->blocked. 3396 */ 3397 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3398 } 3399 3400 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3401 { 3402 if (!vcpu->sigset_active) 3403 return; 3404 3405 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3406 sigemptyset(¤t->real_blocked); 3407 } 3408 3409 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3410 { 3411 unsigned int old, val, grow, grow_start; 3412 3413 old = val = vcpu->halt_poll_ns; 3414 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3415 grow = READ_ONCE(halt_poll_ns_grow); 3416 if (!grow) 3417 goto out; 3418 3419 val *= grow; 3420 if (val < grow_start) 3421 val = grow_start; 3422 3423 vcpu->halt_poll_ns = val; 3424 out: 3425 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3426 } 3427 3428 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3429 { 3430 unsigned int old, val, shrink, grow_start; 3431 3432 old = val = vcpu->halt_poll_ns; 3433 shrink = READ_ONCE(halt_poll_ns_shrink); 3434 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3435 if (shrink == 0) 3436 val = 0; 3437 else 3438 val /= shrink; 3439 3440 if (val < grow_start) 3441 val = 0; 3442 3443 vcpu->halt_poll_ns = val; 3444 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3445 } 3446 3447 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3448 { 3449 int ret = -EINTR; 3450 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3451 3452 if (kvm_arch_vcpu_runnable(vcpu)) 3453 goto out; 3454 if (kvm_cpu_has_pending_timer(vcpu)) 3455 goto out; 3456 if (signal_pending(current)) 3457 goto out; 3458 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3459 goto out; 3460 3461 ret = 0; 3462 out: 3463 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3464 return ret; 3465 } 3466 3467 /* 3468 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3469 * pending. This is mostly used when halting a vCPU, but may also be used 3470 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3471 */ 3472 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3473 { 3474 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3475 bool waited = false; 3476 3477 vcpu->stat.generic.blocking = 1; 3478 3479 preempt_disable(); 3480 kvm_arch_vcpu_blocking(vcpu); 3481 prepare_to_rcuwait(wait); 3482 preempt_enable(); 3483 3484 for (;;) { 3485 set_current_state(TASK_INTERRUPTIBLE); 3486 3487 if (kvm_vcpu_check_block(vcpu) < 0) 3488 break; 3489 3490 waited = true; 3491 schedule(); 3492 } 3493 3494 preempt_disable(); 3495 finish_rcuwait(wait); 3496 kvm_arch_vcpu_unblocking(vcpu); 3497 preempt_enable(); 3498 3499 vcpu->stat.generic.blocking = 0; 3500 3501 return waited; 3502 } 3503 3504 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3505 ktime_t end, bool success) 3506 { 3507 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3508 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3509 3510 ++vcpu->stat.generic.halt_attempted_poll; 3511 3512 if (success) { 3513 ++vcpu->stat.generic.halt_successful_poll; 3514 3515 if (!vcpu_valid_wakeup(vcpu)) 3516 ++vcpu->stat.generic.halt_poll_invalid; 3517 3518 stats->halt_poll_success_ns += poll_ns; 3519 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3520 } else { 3521 stats->halt_poll_fail_ns += poll_ns; 3522 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3523 } 3524 } 3525 3526 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) 3527 { 3528 struct kvm *kvm = vcpu->kvm; 3529 3530 if (kvm->override_halt_poll_ns) { 3531 /* 3532 * Ensure kvm->max_halt_poll_ns is not read before 3533 * kvm->override_halt_poll_ns. 3534 * 3535 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL. 3536 */ 3537 smp_rmb(); 3538 return READ_ONCE(kvm->max_halt_poll_ns); 3539 } 3540 3541 return READ_ONCE(halt_poll_ns); 3542 } 3543 3544 /* 3545 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3546 * polling is enabled, busy wait for a short time before blocking to avoid the 3547 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3548 * is halted. 3549 */ 3550 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3551 { 3552 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3553 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3554 ktime_t start, cur, poll_end; 3555 bool waited = false; 3556 bool do_halt_poll; 3557 u64 halt_ns; 3558 3559 if (vcpu->halt_poll_ns > max_halt_poll_ns) 3560 vcpu->halt_poll_ns = max_halt_poll_ns; 3561 3562 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3563 3564 start = cur = poll_end = ktime_get(); 3565 if (do_halt_poll) { 3566 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3567 3568 do { 3569 if (kvm_vcpu_check_block(vcpu) < 0) 3570 goto out; 3571 cpu_relax(); 3572 poll_end = cur = ktime_get(); 3573 } while (kvm_vcpu_can_poll(cur, stop)); 3574 } 3575 3576 waited = kvm_vcpu_block(vcpu); 3577 3578 cur = ktime_get(); 3579 if (waited) { 3580 vcpu->stat.generic.halt_wait_ns += 3581 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3582 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3583 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3584 } 3585 out: 3586 /* The total time the vCPU was "halted", including polling time. */ 3587 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3588 3589 /* 3590 * Note, halt-polling is considered successful so long as the vCPU was 3591 * never actually scheduled out, i.e. even if the wake event arrived 3592 * after of the halt-polling loop itself, but before the full wait. 3593 */ 3594 if (do_halt_poll) 3595 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3596 3597 if (halt_poll_allowed) { 3598 /* Recompute the max halt poll time in case it changed. */ 3599 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3600 3601 if (!vcpu_valid_wakeup(vcpu)) { 3602 shrink_halt_poll_ns(vcpu); 3603 } else if (max_halt_poll_ns) { 3604 if (halt_ns <= vcpu->halt_poll_ns) 3605 ; 3606 /* we had a long block, shrink polling */ 3607 else if (vcpu->halt_poll_ns && 3608 halt_ns > max_halt_poll_ns) 3609 shrink_halt_poll_ns(vcpu); 3610 /* we had a short halt and our poll time is too small */ 3611 else if (vcpu->halt_poll_ns < max_halt_poll_ns && 3612 halt_ns < max_halt_poll_ns) 3613 grow_halt_poll_ns(vcpu); 3614 } else { 3615 vcpu->halt_poll_ns = 0; 3616 } 3617 } 3618 3619 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3620 } 3621 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3622 3623 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3624 { 3625 if (__kvm_vcpu_wake_up(vcpu)) { 3626 WRITE_ONCE(vcpu->ready, true); 3627 ++vcpu->stat.generic.halt_wakeup; 3628 return true; 3629 } 3630 3631 return false; 3632 } 3633 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3634 3635 #ifndef CONFIG_S390 3636 /* 3637 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3638 */ 3639 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3640 { 3641 int me, cpu; 3642 3643 if (kvm_vcpu_wake_up(vcpu)) 3644 return; 3645 3646 me = get_cpu(); 3647 /* 3648 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3649 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3650 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3651 * within the vCPU thread itself. 3652 */ 3653 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3654 if (vcpu->mode == IN_GUEST_MODE) 3655 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3656 goto out; 3657 } 3658 3659 /* 3660 * Note, the vCPU could get migrated to a different pCPU at any point 3661 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3662 * IPI to the previous pCPU. But, that's ok because the purpose of the 3663 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3664 * vCPU also requires it to leave IN_GUEST_MODE. 3665 */ 3666 if (kvm_arch_vcpu_should_kick(vcpu)) { 3667 cpu = READ_ONCE(vcpu->cpu); 3668 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3669 smp_send_reschedule(cpu); 3670 } 3671 out: 3672 put_cpu(); 3673 } 3674 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3675 #endif /* !CONFIG_S390 */ 3676 3677 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3678 { 3679 struct pid *pid; 3680 struct task_struct *task = NULL; 3681 int ret = 0; 3682 3683 rcu_read_lock(); 3684 pid = rcu_dereference(target->pid); 3685 if (pid) 3686 task = get_pid_task(pid, PIDTYPE_PID); 3687 rcu_read_unlock(); 3688 if (!task) 3689 return ret; 3690 ret = yield_to(task, 1); 3691 put_task_struct(task); 3692 3693 return ret; 3694 } 3695 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3696 3697 /* 3698 * Helper that checks whether a VCPU is eligible for directed yield. 3699 * Most eligible candidate to yield is decided by following heuristics: 3700 * 3701 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3702 * (preempted lock holder), indicated by @in_spin_loop. 3703 * Set at the beginning and cleared at the end of interception/PLE handler. 3704 * 3705 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3706 * chance last time (mostly it has become eligible now since we have probably 3707 * yielded to lockholder in last iteration. This is done by toggling 3708 * @dy_eligible each time a VCPU checked for eligibility.) 3709 * 3710 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3711 * to preempted lock-holder could result in wrong VCPU selection and CPU 3712 * burning. Giving priority for a potential lock-holder increases lock 3713 * progress. 3714 * 3715 * Since algorithm is based on heuristics, accessing another VCPU data without 3716 * locking does not harm. It may result in trying to yield to same VCPU, fail 3717 * and continue with next VCPU and so on. 3718 */ 3719 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3720 { 3721 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3722 bool eligible; 3723 3724 eligible = !vcpu->spin_loop.in_spin_loop || 3725 vcpu->spin_loop.dy_eligible; 3726 3727 if (vcpu->spin_loop.in_spin_loop) 3728 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3729 3730 return eligible; 3731 #else 3732 return true; 3733 #endif 3734 } 3735 3736 /* 3737 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3738 * a vcpu_load/vcpu_put pair. However, for most architectures 3739 * kvm_arch_vcpu_runnable does not require vcpu_load. 3740 */ 3741 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3742 { 3743 return kvm_arch_vcpu_runnable(vcpu); 3744 } 3745 3746 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3747 { 3748 if (kvm_arch_dy_runnable(vcpu)) 3749 return true; 3750 3751 #ifdef CONFIG_KVM_ASYNC_PF 3752 if (!list_empty_careful(&vcpu->async_pf.done)) 3753 return true; 3754 #endif 3755 3756 return false; 3757 } 3758 3759 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3760 { 3761 return false; 3762 } 3763 3764 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3765 { 3766 struct kvm *kvm = me->kvm; 3767 struct kvm_vcpu *vcpu; 3768 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3769 unsigned long i; 3770 int yielded = 0; 3771 int try = 3; 3772 int pass; 3773 3774 kvm_vcpu_set_in_spin_loop(me, true); 3775 /* 3776 * We boost the priority of a VCPU that is runnable but not 3777 * currently running, because it got preempted by something 3778 * else and called schedule in __vcpu_run. Hopefully that 3779 * VCPU is holding the lock that we need and will release it. 3780 * We approximate round-robin by starting at the last boosted VCPU. 3781 */ 3782 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3783 kvm_for_each_vcpu(i, vcpu, kvm) { 3784 if (!pass && i <= last_boosted_vcpu) { 3785 i = last_boosted_vcpu; 3786 continue; 3787 } else if (pass && i > last_boosted_vcpu) 3788 break; 3789 if (!READ_ONCE(vcpu->ready)) 3790 continue; 3791 if (vcpu == me) 3792 continue; 3793 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3794 continue; 3795 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3796 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3797 !kvm_arch_vcpu_in_kernel(vcpu)) 3798 continue; 3799 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3800 continue; 3801 3802 yielded = kvm_vcpu_yield_to(vcpu); 3803 if (yielded > 0) { 3804 kvm->last_boosted_vcpu = i; 3805 break; 3806 } else if (yielded < 0) { 3807 try--; 3808 if (!try) 3809 break; 3810 } 3811 } 3812 } 3813 kvm_vcpu_set_in_spin_loop(me, false); 3814 3815 /* Ensure vcpu is not eligible during next spinloop */ 3816 kvm_vcpu_set_dy_eligible(me, false); 3817 } 3818 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3819 3820 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3821 { 3822 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3823 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3824 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3825 kvm->dirty_ring_size / PAGE_SIZE); 3826 #else 3827 return false; 3828 #endif 3829 } 3830 3831 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3832 { 3833 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3834 struct page *page; 3835 3836 if (vmf->pgoff == 0) 3837 page = virt_to_page(vcpu->run); 3838 #ifdef CONFIG_X86 3839 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3840 page = virt_to_page(vcpu->arch.pio_data); 3841 #endif 3842 #ifdef CONFIG_KVM_MMIO 3843 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3844 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3845 #endif 3846 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3847 page = kvm_dirty_ring_get_page( 3848 &vcpu->dirty_ring, 3849 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3850 else 3851 return kvm_arch_vcpu_fault(vcpu, vmf); 3852 get_page(page); 3853 vmf->page = page; 3854 return 0; 3855 } 3856 3857 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3858 .fault = kvm_vcpu_fault, 3859 }; 3860 3861 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3862 { 3863 struct kvm_vcpu *vcpu = file->private_data; 3864 unsigned long pages = vma_pages(vma); 3865 3866 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3867 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3868 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3869 return -EINVAL; 3870 3871 vma->vm_ops = &kvm_vcpu_vm_ops; 3872 return 0; 3873 } 3874 3875 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3876 { 3877 struct kvm_vcpu *vcpu = filp->private_data; 3878 3879 kvm_put_kvm(vcpu->kvm); 3880 return 0; 3881 } 3882 3883 static struct file_operations kvm_vcpu_fops = { 3884 .release = kvm_vcpu_release, 3885 .unlocked_ioctl = kvm_vcpu_ioctl, 3886 .mmap = kvm_vcpu_mmap, 3887 .llseek = noop_llseek, 3888 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3889 }; 3890 3891 /* 3892 * Allocates an inode for the vcpu. 3893 */ 3894 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3895 { 3896 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3897 3898 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3899 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3900 } 3901 3902 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3903 static int vcpu_get_pid(void *data, u64 *val) 3904 { 3905 struct kvm_vcpu *vcpu = data; 3906 3907 rcu_read_lock(); 3908 *val = pid_nr(rcu_dereference(vcpu->pid)); 3909 rcu_read_unlock(); 3910 return 0; 3911 } 3912 3913 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n"); 3914 3915 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3916 { 3917 struct dentry *debugfs_dentry; 3918 char dir_name[ITOA_MAX_LEN * 2]; 3919 3920 if (!debugfs_initialized()) 3921 return; 3922 3923 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3924 debugfs_dentry = debugfs_create_dir(dir_name, 3925 vcpu->kvm->debugfs_dentry); 3926 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, 3927 &vcpu_get_pid_fops); 3928 3929 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3930 } 3931 #endif 3932 3933 /* 3934 * Creates some virtual cpus. Good luck creating more than one. 3935 */ 3936 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3937 { 3938 int r; 3939 struct kvm_vcpu *vcpu; 3940 struct page *page; 3941 3942 if (id >= KVM_MAX_VCPU_IDS) 3943 return -EINVAL; 3944 3945 mutex_lock(&kvm->lock); 3946 if (kvm->created_vcpus >= kvm->max_vcpus) { 3947 mutex_unlock(&kvm->lock); 3948 return -EINVAL; 3949 } 3950 3951 r = kvm_arch_vcpu_precreate(kvm, id); 3952 if (r) { 3953 mutex_unlock(&kvm->lock); 3954 return r; 3955 } 3956 3957 kvm->created_vcpus++; 3958 mutex_unlock(&kvm->lock); 3959 3960 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3961 if (!vcpu) { 3962 r = -ENOMEM; 3963 goto vcpu_decrement; 3964 } 3965 3966 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3967 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3968 if (!page) { 3969 r = -ENOMEM; 3970 goto vcpu_free; 3971 } 3972 vcpu->run = page_address(page); 3973 3974 kvm_vcpu_init(vcpu, kvm, id); 3975 3976 r = kvm_arch_vcpu_create(vcpu); 3977 if (r) 3978 goto vcpu_free_run_page; 3979 3980 if (kvm->dirty_ring_size) { 3981 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3982 id, kvm->dirty_ring_size); 3983 if (r) 3984 goto arch_vcpu_destroy; 3985 } 3986 3987 mutex_lock(&kvm->lock); 3988 3989 #ifdef CONFIG_LOCKDEP 3990 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */ 3991 mutex_lock(&vcpu->mutex); 3992 mutex_unlock(&vcpu->mutex); 3993 #endif 3994 3995 if (kvm_get_vcpu_by_id(kvm, id)) { 3996 r = -EEXIST; 3997 goto unlock_vcpu_destroy; 3998 } 3999 4000 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 4001 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT); 4002 if (r) 4003 goto unlock_vcpu_destroy; 4004 4005 /* Now it's all set up, let userspace reach it */ 4006 kvm_get_kvm(kvm); 4007 r = create_vcpu_fd(vcpu); 4008 if (r < 0) 4009 goto kvm_put_xa_release; 4010 4011 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) { 4012 r = -EINVAL; 4013 goto kvm_put_xa_release; 4014 } 4015 4016 /* 4017 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 4018 * pointer before kvm->online_vcpu's incremented value. 4019 */ 4020 smp_wmb(); 4021 atomic_inc(&kvm->online_vcpus); 4022 4023 mutex_unlock(&kvm->lock); 4024 kvm_arch_vcpu_postcreate(vcpu); 4025 kvm_create_vcpu_debugfs(vcpu); 4026 return r; 4027 4028 kvm_put_xa_release: 4029 kvm_put_kvm_no_destroy(kvm); 4030 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx); 4031 unlock_vcpu_destroy: 4032 mutex_unlock(&kvm->lock); 4033 kvm_dirty_ring_free(&vcpu->dirty_ring); 4034 arch_vcpu_destroy: 4035 kvm_arch_vcpu_destroy(vcpu); 4036 vcpu_free_run_page: 4037 free_page((unsigned long)vcpu->run); 4038 vcpu_free: 4039 kmem_cache_free(kvm_vcpu_cache, vcpu); 4040 vcpu_decrement: 4041 mutex_lock(&kvm->lock); 4042 kvm->created_vcpus--; 4043 mutex_unlock(&kvm->lock); 4044 return r; 4045 } 4046 4047 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 4048 { 4049 if (sigset) { 4050 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 4051 vcpu->sigset_active = 1; 4052 vcpu->sigset = *sigset; 4053 } else 4054 vcpu->sigset_active = 0; 4055 return 0; 4056 } 4057 4058 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 4059 size_t size, loff_t *offset) 4060 { 4061 struct kvm_vcpu *vcpu = file->private_data; 4062 4063 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 4064 &kvm_vcpu_stats_desc[0], &vcpu->stat, 4065 sizeof(vcpu->stat), user_buffer, size, offset); 4066 } 4067 4068 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file) 4069 { 4070 struct kvm_vcpu *vcpu = file->private_data; 4071 4072 kvm_put_kvm(vcpu->kvm); 4073 return 0; 4074 } 4075 4076 static const struct file_operations kvm_vcpu_stats_fops = { 4077 .owner = THIS_MODULE, 4078 .read = kvm_vcpu_stats_read, 4079 .release = kvm_vcpu_stats_release, 4080 .llseek = noop_llseek, 4081 }; 4082 4083 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 4084 { 4085 int fd; 4086 struct file *file; 4087 char name[15 + ITOA_MAX_LEN + 1]; 4088 4089 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 4090 4091 fd = get_unused_fd_flags(O_CLOEXEC); 4092 if (fd < 0) 4093 return fd; 4094 4095 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 4096 if (IS_ERR(file)) { 4097 put_unused_fd(fd); 4098 return PTR_ERR(file); 4099 } 4100 4101 kvm_get_kvm(vcpu->kvm); 4102 4103 file->f_mode |= FMODE_PREAD; 4104 fd_install(fd, file); 4105 4106 return fd; 4107 } 4108 4109 static long kvm_vcpu_ioctl(struct file *filp, 4110 unsigned int ioctl, unsigned long arg) 4111 { 4112 struct kvm_vcpu *vcpu = filp->private_data; 4113 void __user *argp = (void __user *)arg; 4114 int r; 4115 struct kvm_fpu *fpu = NULL; 4116 struct kvm_sregs *kvm_sregs = NULL; 4117 4118 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4119 return -EIO; 4120 4121 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 4122 return -EINVAL; 4123 4124 /* 4125 * Some architectures have vcpu ioctls that are asynchronous to vcpu 4126 * execution; mutex_lock() would break them. 4127 */ 4128 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 4129 if (r != -ENOIOCTLCMD) 4130 return r; 4131 4132 if (mutex_lock_killable(&vcpu->mutex)) 4133 return -EINTR; 4134 switch (ioctl) { 4135 case KVM_RUN: { 4136 struct pid *oldpid; 4137 r = -EINVAL; 4138 if (arg) 4139 goto out; 4140 oldpid = rcu_access_pointer(vcpu->pid); 4141 if (unlikely(oldpid != task_pid(current))) { 4142 /* The thread running this VCPU changed. */ 4143 struct pid *newpid; 4144 4145 r = kvm_arch_vcpu_run_pid_change(vcpu); 4146 if (r) 4147 break; 4148 4149 newpid = get_task_pid(current, PIDTYPE_PID); 4150 rcu_assign_pointer(vcpu->pid, newpid); 4151 if (oldpid) 4152 synchronize_rcu(); 4153 put_pid(oldpid); 4154 } 4155 r = kvm_arch_vcpu_ioctl_run(vcpu); 4156 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 4157 break; 4158 } 4159 case KVM_GET_REGS: { 4160 struct kvm_regs *kvm_regs; 4161 4162 r = -ENOMEM; 4163 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 4164 if (!kvm_regs) 4165 goto out; 4166 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 4167 if (r) 4168 goto out_free1; 4169 r = -EFAULT; 4170 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 4171 goto out_free1; 4172 r = 0; 4173 out_free1: 4174 kfree(kvm_regs); 4175 break; 4176 } 4177 case KVM_SET_REGS: { 4178 struct kvm_regs *kvm_regs; 4179 4180 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 4181 if (IS_ERR(kvm_regs)) { 4182 r = PTR_ERR(kvm_regs); 4183 goto out; 4184 } 4185 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 4186 kfree(kvm_regs); 4187 break; 4188 } 4189 case KVM_GET_SREGS: { 4190 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 4191 GFP_KERNEL_ACCOUNT); 4192 r = -ENOMEM; 4193 if (!kvm_sregs) 4194 goto out; 4195 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 4196 if (r) 4197 goto out; 4198 r = -EFAULT; 4199 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 4200 goto out; 4201 r = 0; 4202 break; 4203 } 4204 case KVM_SET_SREGS: { 4205 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 4206 if (IS_ERR(kvm_sregs)) { 4207 r = PTR_ERR(kvm_sregs); 4208 kvm_sregs = NULL; 4209 goto out; 4210 } 4211 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 4212 break; 4213 } 4214 case KVM_GET_MP_STATE: { 4215 struct kvm_mp_state mp_state; 4216 4217 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 4218 if (r) 4219 goto out; 4220 r = -EFAULT; 4221 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 4222 goto out; 4223 r = 0; 4224 break; 4225 } 4226 case KVM_SET_MP_STATE: { 4227 struct kvm_mp_state mp_state; 4228 4229 r = -EFAULT; 4230 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 4231 goto out; 4232 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 4233 break; 4234 } 4235 case KVM_TRANSLATE: { 4236 struct kvm_translation tr; 4237 4238 r = -EFAULT; 4239 if (copy_from_user(&tr, argp, sizeof(tr))) 4240 goto out; 4241 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4242 if (r) 4243 goto out; 4244 r = -EFAULT; 4245 if (copy_to_user(argp, &tr, sizeof(tr))) 4246 goto out; 4247 r = 0; 4248 break; 4249 } 4250 case KVM_SET_GUEST_DEBUG: { 4251 struct kvm_guest_debug dbg; 4252 4253 r = -EFAULT; 4254 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4255 goto out; 4256 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4257 break; 4258 } 4259 case KVM_SET_SIGNAL_MASK: { 4260 struct kvm_signal_mask __user *sigmask_arg = argp; 4261 struct kvm_signal_mask kvm_sigmask; 4262 sigset_t sigset, *p; 4263 4264 p = NULL; 4265 if (argp) { 4266 r = -EFAULT; 4267 if (copy_from_user(&kvm_sigmask, argp, 4268 sizeof(kvm_sigmask))) 4269 goto out; 4270 r = -EINVAL; 4271 if (kvm_sigmask.len != sizeof(sigset)) 4272 goto out; 4273 r = -EFAULT; 4274 if (copy_from_user(&sigset, sigmask_arg->sigset, 4275 sizeof(sigset))) 4276 goto out; 4277 p = &sigset; 4278 } 4279 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4280 break; 4281 } 4282 case KVM_GET_FPU: { 4283 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 4284 r = -ENOMEM; 4285 if (!fpu) 4286 goto out; 4287 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4288 if (r) 4289 goto out; 4290 r = -EFAULT; 4291 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4292 goto out; 4293 r = 0; 4294 break; 4295 } 4296 case KVM_SET_FPU: { 4297 fpu = memdup_user(argp, sizeof(*fpu)); 4298 if (IS_ERR(fpu)) { 4299 r = PTR_ERR(fpu); 4300 fpu = NULL; 4301 goto out; 4302 } 4303 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4304 break; 4305 } 4306 case KVM_GET_STATS_FD: { 4307 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4308 break; 4309 } 4310 default: 4311 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4312 } 4313 out: 4314 mutex_unlock(&vcpu->mutex); 4315 kfree(fpu); 4316 kfree(kvm_sregs); 4317 return r; 4318 } 4319 4320 #ifdef CONFIG_KVM_COMPAT 4321 static long kvm_vcpu_compat_ioctl(struct file *filp, 4322 unsigned int ioctl, unsigned long arg) 4323 { 4324 struct kvm_vcpu *vcpu = filp->private_data; 4325 void __user *argp = compat_ptr(arg); 4326 int r; 4327 4328 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4329 return -EIO; 4330 4331 switch (ioctl) { 4332 case KVM_SET_SIGNAL_MASK: { 4333 struct kvm_signal_mask __user *sigmask_arg = argp; 4334 struct kvm_signal_mask kvm_sigmask; 4335 sigset_t sigset; 4336 4337 if (argp) { 4338 r = -EFAULT; 4339 if (copy_from_user(&kvm_sigmask, argp, 4340 sizeof(kvm_sigmask))) 4341 goto out; 4342 r = -EINVAL; 4343 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4344 goto out; 4345 r = -EFAULT; 4346 if (get_compat_sigset(&sigset, 4347 (compat_sigset_t __user *)sigmask_arg->sigset)) 4348 goto out; 4349 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4350 } else 4351 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4352 break; 4353 } 4354 default: 4355 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4356 } 4357 4358 out: 4359 return r; 4360 } 4361 #endif 4362 4363 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4364 { 4365 struct kvm_device *dev = filp->private_data; 4366 4367 if (dev->ops->mmap) 4368 return dev->ops->mmap(dev, vma); 4369 4370 return -ENODEV; 4371 } 4372 4373 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4374 int (*accessor)(struct kvm_device *dev, 4375 struct kvm_device_attr *attr), 4376 unsigned long arg) 4377 { 4378 struct kvm_device_attr attr; 4379 4380 if (!accessor) 4381 return -EPERM; 4382 4383 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4384 return -EFAULT; 4385 4386 return accessor(dev, &attr); 4387 } 4388 4389 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4390 unsigned long arg) 4391 { 4392 struct kvm_device *dev = filp->private_data; 4393 4394 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4395 return -EIO; 4396 4397 switch (ioctl) { 4398 case KVM_SET_DEVICE_ATTR: 4399 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4400 case KVM_GET_DEVICE_ATTR: 4401 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4402 case KVM_HAS_DEVICE_ATTR: 4403 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4404 default: 4405 if (dev->ops->ioctl) 4406 return dev->ops->ioctl(dev, ioctl, arg); 4407 4408 return -ENOTTY; 4409 } 4410 } 4411 4412 static int kvm_device_release(struct inode *inode, struct file *filp) 4413 { 4414 struct kvm_device *dev = filp->private_data; 4415 struct kvm *kvm = dev->kvm; 4416 4417 if (dev->ops->release) { 4418 mutex_lock(&kvm->lock); 4419 list_del(&dev->vm_node); 4420 dev->ops->release(dev); 4421 mutex_unlock(&kvm->lock); 4422 } 4423 4424 kvm_put_kvm(kvm); 4425 return 0; 4426 } 4427 4428 static struct file_operations kvm_device_fops = { 4429 .unlocked_ioctl = kvm_device_ioctl, 4430 .release = kvm_device_release, 4431 KVM_COMPAT(kvm_device_ioctl), 4432 .mmap = kvm_device_mmap, 4433 }; 4434 4435 struct kvm_device *kvm_device_from_filp(struct file *filp) 4436 { 4437 if (filp->f_op != &kvm_device_fops) 4438 return NULL; 4439 4440 return filp->private_data; 4441 } 4442 4443 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4444 #ifdef CONFIG_KVM_MPIC 4445 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4446 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4447 #endif 4448 }; 4449 4450 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4451 { 4452 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4453 return -ENOSPC; 4454 4455 if (kvm_device_ops_table[type] != NULL) 4456 return -EEXIST; 4457 4458 kvm_device_ops_table[type] = ops; 4459 return 0; 4460 } 4461 4462 void kvm_unregister_device_ops(u32 type) 4463 { 4464 if (kvm_device_ops_table[type] != NULL) 4465 kvm_device_ops_table[type] = NULL; 4466 } 4467 4468 static int kvm_ioctl_create_device(struct kvm *kvm, 4469 struct kvm_create_device *cd) 4470 { 4471 const struct kvm_device_ops *ops; 4472 struct kvm_device *dev; 4473 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4474 int type; 4475 int ret; 4476 4477 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4478 return -ENODEV; 4479 4480 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4481 ops = kvm_device_ops_table[type]; 4482 if (ops == NULL) 4483 return -ENODEV; 4484 4485 if (test) 4486 return 0; 4487 4488 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4489 if (!dev) 4490 return -ENOMEM; 4491 4492 dev->ops = ops; 4493 dev->kvm = kvm; 4494 4495 mutex_lock(&kvm->lock); 4496 ret = ops->create(dev, type); 4497 if (ret < 0) { 4498 mutex_unlock(&kvm->lock); 4499 kfree(dev); 4500 return ret; 4501 } 4502 list_add(&dev->vm_node, &kvm->devices); 4503 mutex_unlock(&kvm->lock); 4504 4505 if (ops->init) 4506 ops->init(dev); 4507 4508 kvm_get_kvm(kvm); 4509 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4510 if (ret < 0) { 4511 kvm_put_kvm_no_destroy(kvm); 4512 mutex_lock(&kvm->lock); 4513 list_del(&dev->vm_node); 4514 if (ops->release) 4515 ops->release(dev); 4516 mutex_unlock(&kvm->lock); 4517 if (ops->destroy) 4518 ops->destroy(dev); 4519 return ret; 4520 } 4521 4522 cd->fd = ret; 4523 return 0; 4524 } 4525 4526 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4527 { 4528 switch (arg) { 4529 case KVM_CAP_USER_MEMORY: 4530 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4531 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4532 case KVM_CAP_INTERNAL_ERROR_DATA: 4533 #ifdef CONFIG_HAVE_KVM_MSI 4534 case KVM_CAP_SIGNAL_MSI: 4535 #endif 4536 #ifdef CONFIG_HAVE_KVM_IRQFD 4537 case KVM_CAP_IRQFD: 4538 #endif 4539 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4540 case KVM_CAP_CHECK_EXTENSION_VM: 4541 case KVM_CAP_ENABLE_CAP_VM: 4542 case KVM_CAP_HALT_POLL: 4543 return 1; 4544 #ifdef CONFIG_KVM_MMIO 4545 case KVM_CAP_COALESCED_MMIO: 4546 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4547 case KVM_CAP_COALESCED_PIO: 4548 return 1; 4549 #endif 4550 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4551 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4552 return KVM_DIRTY_LOG_MANUAL_CAPS; 4553 #endif 4554 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4555 case KVM_CAP_IRQ_ROUTING: 4556 return KVM_MAX_IRQ_ROUTES; 4557 #endif 4558 #if KVM_ADDRESS_SPACE_NUM > 1 4559 case KVM_CAP_MULTI_ADDRESS_SPACE: 4560 return KVM_ADDRESS_SPACE_NUM; 4561 #endif 4562 case KVM_CAP_NR_MEMSLOTS: 4563 return KVM_USER_MEM_SLOTS; 4564 case KVM_CAP_DIRTY_LOG_RING: 4565 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO 4566 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4567 #else 4568 return 0; 4569 #endif 4570 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4571 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL 4572 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4573 #else 4574 return 0; 4575 #endif 4576 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP 4577 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: 4578 #endif 4579 case KVM_CAP_BINARY_STATS_FD: 4580 case KVM_CAP_SYSTEM_EVENT_DATA: 4581 return 1; 4582 default: 4583 break; 4584 } 4585 return kvm_vm_ioctl_check_extension(kvm, arg); 4586 } 4587 4588 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4589 { 4590 int r; 4591 4592 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4593 return -EINVAL; 4594 4595 /* the size should be power of 2 */ 4596 if (!size || (size & (size - 1))) 4597 return -EINVAL; 4598 4599 /* Should be bigger to keep the reserved entries, or a page */ 4600 if (size < kvm_dirty_ring_get_rsvd_entries() * 4601 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4602 return -EINVAL; 4603 4604 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4605 sizeof(struct kvm_dirty_gfn)) 4606 return -E2BIG; 4607 4608 /* We only allow it to set once */ 4609 if (kvm->dirty_ring_size) 4610 return -EINVAL; 4611 4612 mutex_lock(&kvm->lock); 4613 4614 if (kvm->created_vcpus) { 4615 /* We don't allow to change this value after vcpu created */ 4616 r = -EINVAL; 4617 } else { 4618 kvm->dirty_ring_size = size; 4619 r = 0; 4620 } 4621 4622 mutex_unlock(&kvm->lock); 4623 return r; 4624 } 4625 4626 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4627 { 4628 unsigned long i; 4629 struct kvm_vcpu *vcpu; 4630 int cleared = 0; 4631 4632 if (!kvm->dirty_ring_size) 4633 return -EINVAL; 4634 4635 mutex_lock(&kvm->slots_lock); 4636 4637 kvm_for_each_vcpu(i, vcpu, kvm) 4638 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4639 4640 mutex_unlock(&kvm->slots_lock); 4641 4642 if (cleared) 4643 kvm_flush_remote_tlbs(kvm); 4644 4645 return cleared; 4646 } 4647 4648 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4649 struct kvm_enable_cap *cap) 4650 { 4651 return -EINVAL; 4652 } 4653 4654 bool kvm_are_all_memslots_empty(struct kvm *kvm) 4655 { 4656 int i; 4657 4658 lockdep_assert_held(&kvm->slots_lock); 4659 4660 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 4661 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) 4662 return false; 4663 } 4664 4665 return true; 4666 } 4667 EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty); 4668 4669 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4670 struct kvm_enable_cap *cap) 4671 { 4672 switch (cap->cap) { 4673 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4674 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4675 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4676 4677 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4678 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4679 4680 if (cap->flags || (cap->args[0] & ~allowed_options)) 4681 return -EINVAL; 4682 kvm->manual_dirty_log_protect = cap->args[0]; 4683 return 0; 4684 } 4685 #endif 4686 case KVM_CAP_HALT_POLL: { 4687 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4688 return -EINVAL; 4689 4690 kvm->max_halt_poll_ns = cap->args[0]; 4691 4692 /* 4693 * Ensure kvm->override_halt_poll_ns does not become visible 4694 * before kvm->max_halt_poll_ns. 4695 * 4696 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns(). 4697 */ 4698 smp_wmb(); 4699 kvm->override_halt_poll_ns = true; 4700 4701 return 0; 4702 } 4703 case KVM_CAP_DIRTY_LOG_RING: 4704 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4705 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) 4706 return -EINVAL; 4707 4708 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4709 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: { 4710 int r = -EINVAL; 4711 4712 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) || 4713 !kvm->dirty_ring_size || cap->flags) 4714 return r; 4715 4716 mutex_lock(&kvm->slots_lock); 4717 4718 /* 4719 * For simplicity, allow enabling ring+bitmap if and only if 4720 * there are no memslots, e.g. to ensure all memslots allocate 4721 * a bitmap after the capability is enabled. 4722 */ 4723 if (kvm_are_all_memslots_empty(kvm)) { 4724 kvm->dirty_ring_with_bitmap = true; 4725 r = 0; 4726 } 4727 4728 mutex_unlock(&kvm->slots_lock); 4729 4730 return r; 4731 } 4732 default: 4733 return kvm_vm_ioctl_enable_cap(kvm, cap); 4734 } 4735 } 4736 4737 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4738 size_t size, loff_t *offset) 4739 { 4740 struct kvm *kvm = file->private_data; 4741 4742 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4743 &kvm_vm_stats_desc[0], &kvm->stat, 4744 sizeof(kvm->stat), user_buffer, size, offset); 4745 } 4746 4747 static int kvm_vm_stats_release(struct inode *inode, struct file *file) 4748 { 4749 struct kvm *kvm = file->private_data; 4750 4751 kvm_put_kvm(kvm); 4752 return 0; 4753 } 4754 4755 static const struct file_operations kvm_vm_stats_fops = { 4756 .owner = THIS_MODULE, 4757 .read = kvm_vm_stats_read, 4758 .release = kvm_vm_stats_release, 4759 .llseek = noop_llseek, 4760 }; 4761 4762 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4763 { 4764 int fd; 4765 struct file *file; 4766 4767 fd = get_unused_fd_flags(O_CLOEXEC); 4768 if (fd < 0) 4769 return fd; 4770 4771 file = anon_inode_getfile("kvm-vm-stats", 4772 &kvm_vm_stats_fops, kvm, O_RDONLY); 4773 if (IS_ERR(file)) { 4774 put_unused_fd(fd); 4775 return PTR_ERR(file); 4776 } 4777 4778 kvm_get_kvm(kvm); 4779 4780 file->f_mode |= FMODE_PREAD; 4781 fd_install(fd, file); 4782 4783 return fd; 4784 } 4785 4786 static long kvm_vm_ioctl(struct file *filp, 4787 unsigned int ioctl, unsigned long arg) 4788 { 4789 struct kvm *kvm = filp->private_data; 4790 void __user *argp = (void __user *)arg; 4791 int r; 4792 4793 if (kvm->mm != current->mm || kvm->vm_dead) 4794 return -EIO; 4795 switch (ioctl) { 4796 case KVM_CREATE_VCPU: 4797 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4798 break; 4799 case KVM_ENABLE_CAP: { 4800 struct kvm_enable_cap cap; 4801 4802 r = -EFAULT; 4803 if (copy_from_user(&cap, argp, sizeof(cap))) 4804 goto out; 4805 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4806 break; 4807 } 4808 case KVM_SET_USER_MEMORY_REGION: { 4809 struct kvm_userspace_memory_region kvm_userspace_mem; 4810 4811 r = -EFAULT; 4812 if (copy_from_user(&kvm_userspace_mem, argp, 4813 sizeof(kvm_userspace_mem))) 4814 goto out; 4815 4816 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4817 break; 4818 } 4819 case KVM_GET_DIRTY_LOG: { 4820 struct kvm_dirty_log log; 4821 4822 r = -EFAULT; 4823 if (copy_from_user(&log, argp, sizeof(log))) 4824 goto out; 4825 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4826 break; 4827 } 4828 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4829 case KVM_CLEAR_DIRTY_LOG: { 4830 struct kvm_clear_dirty_log log; 4831 4832 r = -EFAULT; 4833 if (copy_from_user(&log, argp, sizeof(log))) 4834 goto out; 4835 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4836 break; 4837 } 4838 #endif 4839 #ifdef CONFIG_KVM_MMIO 4840 case KVM_REGISTER_COALESCED_MMIO: { 4841 struct kvm_coalesced_mmio_zone zone; 4842 4843 r = -EFAULT; 4844 if (copy_from_user(&zone, argp, sizeof(zone))) 4845 goto out; 4846 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4847 break; 4848 } 4849 case KVM_UNREGISTER_COALESCED_MMIO: { 4850 struct kvm_coalesced_mmio_zone zone; 4851 4852 r = -EFAULT; 4853 if (copy_from_user(&zone, argp, sizeof(zone))) 4854 goto out; 4855 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4856 break; 4857 } 4858 #endif 4859 case KVM_IRQFD: { 4860 struct kvm_irqfd data; 4861 4862 r = -EFAULT; 4863 if (copy_from_user(&data, argp, sizeof(data))) 4864 goto out; 4865 r = kvm_irqfd(kvm, &data); 4866 break; 4867 } 4868 case KVM_IOEVENTFD: { 4869 struct kvm_ioeventfd data; 4870 4871 r = -EFAULT; 4872 if (copy_from_user(&data, argp, sizeof(data))) 4873 goto out; 4874 r = kvm_ioeventfd(kvm, &data); 4875 break; 4876 } 4877 #ifdef CONFIG_HAVE_KVM_MSI 4878 case KVM_SIGNAL_MSI: { 4879 struct kvm_msi msi; 4880 4881 r = -EFAULT; 4882 if (copy_from_user(&msi, argp, sizeof(msi))) 4883 goto out; 4884 r = kvm_send_userspace_msi(kvm, &msi); 4885 break; 4886 } 4887 #endif 4888 #ifdef __KVM_HAVE_IRQ_LINE 4889 case KVM_IRQ_LINE_STATUS: 4890 case KVM_IRQ_LINE: { 4891 struct kvm_irq_level irq_event; 4892 4893 r = -EFAULT; 4894 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4895 goto out; 4896 4897 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4898 ioctl == KVM_IRQ_LINE_STATUS); 4899 if (r) 4900 goto out; 4901 4902 r = -EFAULT; 4903 if (ioctl == KVM_IRQ_LINE_STATUS) { 4904 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4905 goto out; 4906 } 4907 4908 r = 0; 4909 break; 4910 } 4911 #endif 4912 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4913 case KVM_SET_GSI_ROUTING: { 4914 struct kvm_irq_routing routing; 4915 struct kvm_irq_routing __user *urouting; 4916 struct kvm_irq_routing_entry *entries = NULL; 4917 4918 r = -EFAULT; 4919 if (copy_from_user(&routing, argp, sizeof(routing))) 4920 goto out; 4921 r = -EINVAL; 4922 if (!kvm_arch_can_set_irq_routing(kvm)) 4923 goto out; 4924 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4925 goto out; 4926 if (routing.flags) 4927 goto out; 4928 if (routing.nr) { 4929 urouting = argp; 4930 entries = vmemdup_user(urouting->entries, 4931 array_size(sizeof(*entries), 4932 routing.nr)); 4933 if (IS_ERR(entries)) { 4934 r = PTR_ERR(entries); 4935 goto out; 4936 } 4937 } 4938 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4939 routing.flags); 4940 kvfree(entries); 4941 break; 4942 } 4943 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4944 case KVM_CREATE_DEVICE: { 4945 struct kvm_create_device cd; 4946 4947 r = -EFAULT; 4948 if (copy_from_user(&cd, argp, sizeof(cd))) 4949 goto out; 4950 4951 r = kvm_ioctl_create_device(kvm, &cd); 4952 if (r) 4953 goto out; 4954 4955 r = -EFAULT; 4956 if (copy_to_user(argp, &cd, sizeof(cd))) 4957 goto out; 4958 4959 r = 0; 4960 break; 4961 } 4962 case KVM_CHECK_EXTENSION: 4963 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4964 break; 4965 case KVM_RESET_DIRTY_RINGS: 4966 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4967 break; 4968 case KVM_GET_STATS_FD: 4969 r = kvm_vm_ioctl_get_stats_fd(kvm); 4970 break; 4971 default: 4972 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4973 } 4974 out: 4975 return r; 4976 } 4977 4978 #ifdef CONFIG_KVM_COMPAT 4979 struct compat_kvm_dirty_log { 4980 __u32 slot; 4981 __u32 padding1; 4982 union { 4983 compat_uptr_t dirty_bitmap; /* one bit per page */ 4984 __u64 padding2; 4985 }; 4986 }; 4987 4988 struct compat_kvm_clear_dirty_log { 4989 __u32 slot; 4990 __u32 num_pages; 4991 __u64 first_page; 4992 union { 4993 compat_uptr_t dirty_bitmap; /* one bit per page */ 4994 __u64 padding2; 4995 }; 4996 }; 4997 4998 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 4999 unsigned long arg) 5000 { 5001 return -ENOTTY; 5002 } 5003 5004 static long kvm_vm_compat_ioctl(struct file *filp, 5005 unsigned int ioctl, unsigned long arg) 5006 { 5007 struct kvm *kvm = filp->private_data; 5008 int r; 5009 5010 if (kvm->mm != current->mm || kvm->vm_dead) 5011 return -EIO; 5012 5013 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); 5014 if (r != -ENOTTY) 5015 return r; 5016 5017 switch (ioctl) { 5018 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 5019 case KVM_CLEAR_DIRTY_LOG: { 5020 struct compat_kvm_clear_dirty_log compat_log; 5021 struct kvm_clear_dirty_log log; 5022 5023 if (copy_from_user(&compat_log, (void __user *)arg, 5024 sizeof(compat_log))) 5025 return -EFAULT; 5026 log.slot = compat_log.slot; 5027 log.num_pages = compat_log.num_pages; 5028 log.first_page = compat_log.first_page; 5029 log.padding2 = compat_log.padding2; 5030 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 5031 5032 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 5033 break; 5034 } 5035 #endif 5036 case KVM_GET_DIRTY_LOG: { 5037 struct compat_kvm_dirty_log compat_log; 5038 struct kvm_dirty_log log; 5039 5040 if (copy_from_user(&compat_log, (void __user *)arg, 5041 sizeof(compat_log))) 5042 return -EFAULT; 5043 log.slot = compat_log.slot; 5044 log.padding1 = compat_log.padding1; 5045 log.padding2 = compat_log.padding2; 5046 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 5047 5048 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 5049 break; 5050 } 5051 default: 5052 r = kvm_vm_ioctl(filp, ioctl, arg); 5053 } 5054 return r; 5055 } 5056 #endif 5057 5058 static struct file_operations kvm_vm_fops = { 5059 .release = kvm_vm_release, 5060 .unlocked_ioctl = kvm_vm_ioctl, 5061 .llseek = noop_llseek, 5062 KVM_COMPAT(kvm_vm_compat_ioctl), 5063 }; 5064 5065 bool file_is_kvm(struct file *file) 5066 { 5067 return file && file->f_op == &kvm_vm_fops; 5068 } 5069 EXPORT_SYMBOL_GPL(file_is_kvm); 5070 5071 static int kvm_dev_ioctl_create_vm(unsigned long type) 5072 { 5073 char fdname[ITOA_MAX_LEN + 1]; 5074 int r, fd; 5075 struct kvm *kvm; 5076 struct file *file; 5077 5078 fd = get_unused_fd_flags(O_CLOEXEC); 5079 if (fd < 0) 5080 return fd; 5081 5082 snprintf(fdname, sizeof(fdname), "%d", fd); 5083 5084 kvm = kvm_create_vm(type, fdname); 5085 if (IS_ERR(kvm)) { 5086 r = PTR_ERR(kvm); 5087 goto put_fd; 5088 } 5089 5090 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 5091 if (IS_ERR(file)) { 5092 r = PTR_ERR(file); 5093 goto put_kvm; 5094 } 5095 5096 /* 5097 * Don't call kvm_put_kvm anymore at this point; file->f_op is 5098 * already set, with ->release() being kvm_vm_release(). In error 5099 * cases it will be called by the final fput(file) and will take 5100 * care of doing kvm_put_kvm(kvm). 5101 */ 5102 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 5103 5104 fd_install(fd, file); 5105 return fd; 5106 5107 put_kvm: 5108 kvm_put_kvm(kvm); 5109 put_fd: 5110 put_unused_fd(fd); 5111 return r; 5112 } 5113 5114 static long kvm_dev_ioctl(struct file *filp, 5115 unsigned int ioctl, unsigned long arg) 5116 { 5117 int r = -EINVAL; 5118 5119 switch (ioctl) { 5120 case KVM_GET_API_VERSION: 5121 if (arg) 5122 goto out; 5123 r = KVM_API_VERSION; 5124 break; 5125 case KVM_CREATE_VM: 5126 r = kvm_dev_ioctl_create_vm(arg); 5127 break; 5128 case KVM_CHECK_EXTENSION: 5129 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 5130 break; 5131 case KVM_GET_VCPU_MMAP_SIZE: 5132 if (arg) 5133 goto out; 5134 r = PAGE_SIZE; /* struct kvm_run */ 5135 #ifdef CONFIG_X86 5136 r += PAGE_SIZE; /* pio data page */ 5137 #endif 5138 #ifdef CONFIG_KVM_MMIO 5139 r += PAGE_SIZE; /* coalesced mmio ring page */ 5140 #endif 5141 break; 5142 case KVM_TRACE_ENABLE: 5143 case KVM_TRACE_PAUSE: 5144 case KVM_TRACE_DISABLE: 5145 r = -EOPNOTSUPP; 5146 break; 5147 default: 5148 return kvm_arch_dev_ioctl(filp, ioctl, arg); 5149 } 5150 out: 5151 return r; 5152 } 5153 5154 static struct file_operations kvm_chardev_ops = { 5155 .unlocked_ioctl = kvm_dev_ioctl, 5156 .llseek = noop_llseek, 5157 KVM_COMPAT(kvm_dev_ioctl), 5158 }; 5159 5160 static struct miscdevice kvm_dev = { 5161 KVM_MINOR, 5162 "kvm", 5163 &kvm_chardev_ops, 5164 }; 5165 5166 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5167 __visible bool kvm_rebooting; 5168 EXPORT_SYMBOL_GPL(kvm_rebooting); 5169 5170 static DEFINE_PER_CPU(bool, hardware_enabled); 5171 static int kvm_usage_count; 5172 5173 static int __hardware_enable_nolock(void) 5174 { 5175 if (__this_cpu_read(hardware_enabled)) 5176 return 0; 5177 5178 if (kvm_arch_hardware_enable()) { 5179 pr_info("kvm: enabling virtualization on CPU%d failed\n", 5180 raw_smp_processor_id()); 5181 return -EIO; 5182 } 5183 5184 __this_cpu_write(hardware_enabled, true); 5185 return 0; 5186 } 5187 5188 static void hardware_enable_nolock(void *failed) 5189 { 5190 if (__hardware_enable_nolock()) 5191 atomic_inc(failed); 5192 } 5193 5194 static int kvm_online_cpu(unsigned int cpu) 5195 { 5196 int ret = 0; 5197 5198 /* 5199 * Abort the CPU online process if hardware virtualization cannot 5200 * be enabled. Otherwise running VMs would encounter unrecoverable 5201 * errors when scheduled to this CPU. 5202 */ 5203 mutex_lock(&kvm_lock); 5204 if (kvm_usage_count) 5205 ret = __hardware_enable_nolock(); 5206 mutex_unlock(&kvm_lock); 5207 return ret; 5208 } 5209 5210 static void hardware_disable_nolock(void *junk) 5211 { 5212 /* 5213 * Note, hardware_disable_all_nolock() tells all online CPUs to disable 5214 * hardware, not just CPUs that successfully enabled hardware! 5215 */ 5216 if (!__this_cpu_read(hardware_enabled)) 5217 return; 5218 5219 kvm_arch_hardware_disable(); 5220 5221 __this_cpu_write(hardware_enabled, false); 5222 } 5223 5224 static int kvm_offline_cpu(unsigned int cpu) 5225 { 5226 mutex_lock(&kvm_lock); 5227 if (kvm_usage_count) 5228 hardware_disable_nolock(NULL); 5229 mutex_unlock(&kvm_lock); 5230 return 0; 5231 } 5232 5233 static void hardware_disable_all_nolock(void) 5234 { 5235 BUG_ON(!kvm_usage_count); 5236 5237 kvm_usage_count--; 5238 if (!kvm_usage_count) 5239 on_each_cpu(hardware_disable_nolock, NULL, 1); 5240 } 5241 5242 static void hardware_disable_all(void) 5243 { 5244 cpus_read_lock(); 5245 mutex_lock(&kvm_lock); 5246 hardware_disable_all_nolock(); 5247 mutex_unlock(&kvm_lock); 5248 cpus_read_unlock(); 5249 } 5250 5251 static int hardware_enable_all(void) 5252 { 5253 atomic_t failed = ATOMIC_INIT(0); 5254 int r; 5255 5256 /* 5257 * Do not enable hardware virtualization if the system is going down. 5258 * If userspace initiated a forced reboot, e.g. reboot -f, then it's 5259 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling 5260 * after kvm_reboot() is called. Note, this relies on system_state 5261 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops 5262 * hook instead of registering a dedicated reboot notifier (the latter 5263 * runs before system_state is updated). 5264 */ 5265 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF || 5266 system_state == SYSTEM_RESTART) 5267 return -EBUSY; 5268 5269 /* 5270 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() 5271 * is called, and so on_each_cpu() between them includes the CPU that 5272 * is being onlined. As a result, hardware_enable_nolock() may get 5273 * invoked before kvm_online_cpu(), which also enables hardware if the 5274 * usage count is non-zero. Disable CPU hotplug to avoid attempting to 5275 * enable hardware multiple times. 5276 */ 5277 cpus_read_lock(); 5278 mutex_lock(&kvm_lock); 5279 5280 r = 0; 5281 5282 kvm_usage_count++; 5283 if (kvm_usage_count == 1) { 5284 on_each_cpu(hardware_enable_nolock, &failed, 1); 5285 5286 if (atomic_read(&failed)) { 5287 hardware_disable_all_nolock(); 5288 r = -EBUSY; 5289 } 5290 } 5291 5292 mutex_unlock(&kvm_lock); 5293 cpus_read_unlock(); 5294 5295 return r; 5296 } 5297 5298 static void kvm_shutdown(void) 5299 { 5300 /* 5301 * Disable hardware virtualization and set kvm_rebooting to indicate 5302 * that KVM has asynchronously disabled hardware virtualization, i.e. 5303 * that relevant errors and exceptions aren't entirely unexpected. 5304 * Some flavors of hardware virtualization need to be disabled before 5305 * transferring control to firmware (to perform shutdown/reboot), e.g. 5306 * on x86, virtualization can block INIT interrupts, which are used by 5307 * firmware to pull APs back under firmware control. Note, this path 5308 * is used for both shutdown and reboot scenarios, i.e. neither name is 5309 * 100% comprehensive. 5310 */ 5311 pr_info("kvm: exiting hardware virtualization\n"); 5312 kvm_rebooting = true; 5313 on_each_cpu(hardware_disable_nolock, NULL, 1); 5314 } 5315 5316 static int kvm_suspend(void) 5317 { 5318 /* 5319 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume 5320 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count 5321 * is stable. Assert that kvm_lock is not held to ensure the system 5322 * isn't suspended while KVM is enabling hardware. Hardware enabling 5323 * can be preempted, but the task cannot be frozen until it has dropped 5324 * all locks (userspace tasks are frozen via a fake signal). 5325 */ 5326 lockdep_assert_not_held(&kvm_lock); 5327 lockdep_assert_irqs_disabled(); 5328 5329 if (kvm_usage_count) 5330 hardware_disable_nolock(NULL); 5331 return 0; 5332 } 5333 5334 static void kvm_resume(void) 5335 { 5336 lockdep_assert_not_held(&kvm_lock); 5337 lockdep_assert_irqs_disabled(); 5338 5339 if (kvm_usage_count) 5340 WARN_ON_ONCE(__hardware_enable_nolock()); 5341 } 5342 5343 static struct syscore_ops kvm_syscore_ops = { 5344 .suspend = kvm_suspend, 5345 .resume = kvm_resume, 5346 .shutdown = kvm_shutdown, 5347 }; 5348 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5349 static int hardware_enable_all(void) 5350 { 5351 return 0; 5352 } 5353 5354 static void hardware_disable_all(void) 5355 { 5356 5357 } 5358 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5359 5360 static void kvm_iodevice_destructor(struct kvm_io_device *dev) 5361 { 5362 if (dev->ops->destructor) 5363 dev->ops->destructor(dev); 5364 } 5365 5366 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 5367 { 5368 int i; 5369 5370 for (i = 0; i < bus->dev_count; i++) { 5371 struct kvm_io_device *pos = bus->range[i].dev; 5372 5373 kvm_iodevice_destructor(pos); 5374 } 5375 kfree(bus); 5376 } 5377 5378 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 5379 const struct kvm_io_range *r2) 5380 { 5381 gpa_t addr1 = r1->addr; 5382 gpa_t addr2 = r2->addr; 5383 5384 if (addr1 < addr2) 5385 return -1; 5386 5387 /* If r2->len == 0, match the exact address. If r2->len != 0, 5388 * accept any overlapping write. Any order is acceptable for 5389 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 5390 * we process all of them. 5391 */ 5392 if (r2->len) { 5393 addr1 += r1->len; 5394 addr2 += r2->len; 5395 } 5396 5397 if (addr1 > addr2) 5398 return 1; 5399 5400 return 0; 5401 } 5402 5403 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 5404 { 5405 return kvm_io_bus_cmp(p1, p2); 5406 } 5407 5408 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 5409 gpa_t addr, int len) 5410 { 5411 struct kvm_io_range *range, key; 5412 int off; 5413 5414 key = (struct kvm_io_range) { 5415 .addr = addr, 5416 .len = len, 5417 }; 5418 5419 range = bsearch(&key, bus->range, bus->dev_count, 5420 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5421 if (range == NULL) 5422 return -ENOENT; 5423 5424 off = range - bus->range; 5425 5426 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5427 off--; 5428 5429 return off; 5430 } 5431 5432 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5433 struct kvm_io_range *range, const void *val) 5434 { 5435 int idx; 5436 5437 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5438 if (idx < 0) 5439 return -EOPNOTSUPP; 5440 5441 while (idx < bus->dev_count && 5442 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5443 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5444 range->len, val)) 5445 return idx; 5446 idx++; 5447 } 5448 5449 return -EOPNOTSUPP; 5450 } 5451 5452 /* kvm_io_bus_write - called under kvm->slots_lock */ 5453 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5454 int len, const void *val) 5455 { 5456 struct kvm_io_bus *bus; 5457 struct kvm_io_range range; 5458 int r; 5459 5460 range = (struct kvm_io_range) { 5461 .addr = addr, 5462 .len = len, 5463 }; 5464 5465 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5466 if (!bus) 5467 return -ENOMEM; 5468 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5469 return r < 0 ? r : 0; 5470 } 5471 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5472 5473 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 5474 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5475 gpa_t addr, int len, const void *val, long cookie) 5476 { 5477 struct kvm_io_bus *bus; 5478 struct kvm_io_range range; 5479 5480 range = (struct kvm_io_range) { 5481 .addr = addr, 5482 .len = len, 5483 }; 5484 5485 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5486 if (!bus) 5487 return -ENOMEM; 5488 5489 /* First try the device referenced by cookie. */ 5490 if ((cookie >= 0) && (cookie < bus->dev_count) && 5491 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5492 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5493 val)) 5494 return cookie; 5495 5496 /* 5497 * cookie contained garbage; fall back to search and return the 5498 * correct cookie value. 5499 */ 5500 return __kvm_io_bus_write(vcpu, bus, &range, val); 5501 } 5502 5503 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5504 struct kvm_io_range *range, void *val) 5505 { 5506 int idx; 5507 5508 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5509 if (idx < 0) 5510 return -EOPNOTSUPP; 5511 5512 while (idx < bus->dev_count && 5513 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5514 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5515 range->len, val)) 5516 return idx; 5517 idx++; 5518 } 5519 5520 return -EOPNOTSUPP; 5521 } 5522 5523 /* kvm_io_bus_read - called under kvm->slots_lock */ 5524 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5525 int len, void *val) 5526 { 5527 struct kvm_io_bus *bus; 5528 struct kvm_io_range range; 5529 int r; 5530 5531 range = (struct kvm_io_range) { 5532 .addr = addr, 5533 .len = len, 5534 }; 5535 5536 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5537 if (!bus) 5538 return -ENOMEM; 5539 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5540 return r < 0 ? r : 0; 5541 } 5542 5543 /* Caller must hold slots_lock. */ 5544 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5545 int len, struct kvm_io_device *dev) 5546 { 5547 int i; 5548 struct kvm_io_bus *new_bus, *bus; 5549 struct kvm_io_range range; 5550 5551 bus = kvm_get_bus(kvm, bus_idx); 5552 if (!bus) 5553 return -ENOMEM; 5554 5555 /* exclude ioeventfd which is limited by maximum fd */ 5556 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5557 return -ENOSPC; 5558 5559 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5560 GFP_KERNEL_ACCOUNT); 5561 if (!new_bus) 5562 return -ENOMEM; 5563 5564 range = (struct kvm_io_range) { 5565 .addr = addr, 5566 .len = len, 5567 .dev = dev, 5568 }; 5569 5570 for (i = 0; i < bus->dev_count; i++) 5571 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5572 break; 5573 5574 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5575 new_bus->dev_count++; 5576 new_bus->range[i] = range; 5577 memcpy(new_bus->range + i + 1, bus->range + i, 5578 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5579 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5580 synchronize_srcu_expedited(&kvm->srcu); 5581 kfree(bus); 5582 5583 return 0; 5584 } 5585 5586 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5587 struct kvm_io_device *dev) 5588 { 5589 int i; 5590 struct kvm_io_bus *new_bus, *bus; 5591 5592 lockdep_assert_held(&kvm->slots_lock); 5593 5594 bus = kvm_get_bus(kvm, bus_idx); 5595 if (!bus) 5596 return 0; 5597 5598 for (i = 0; i < bus->dev_count; i++) { 5599 if (bus->range[i].dev == dev) { 5600 break; 5601 } 5602 } 5603 5604 if (i == bus->dev_count) 5605 return 0; 5606 5607 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5608 GFP_KERNEL_ACCOUNT); 5609 if (new_bus) { 5610 memcpy(new_bus, bus, struct_size(bus, range, i)); 5611 new_bus->dev_count--; 5612 memcpy(new_bus->range + i, bus->range + i + 1, 5613 flex_array_size(new_bus, range, new_bus->dev_count - i)); 5614 } 5615 5616 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5617 synchronize_srcu_expedited(&kvm->srcu); 5618 5619 /* 5620 * If NULL bus is installed, destroy the old bus, including all the 5621 * attached devices. Otherwise, destroy the caller's device only. 5622 */ 5623 if (!new_bus) { 5624 pr_err("kvm: failed to shrink bus, removing it completely\n"); 5625 kvm_io_bus_destroy(bus); 5626 return -ENOMEM; 5627 } 5628 5629 kvm_iodevice_destructor(dev); 5630 kfree(bus); 5631 return 0; 5632 } 5633 5634 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5635 gpa_t addr) 5636 { 5637 struct kvm_io_bus *bus; 5638 int dev_idx, srcu_idx; 5639 struct kvm_io_device *iodev = NULL; 5640 5641 srcu_idx = srcu_read_lock(&kvm->srcu); 5642 5643 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 5644 if (!bus) 5645 goto out_unlock; 5646 5647 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 5648 if (dev_idx < 0) 5649 goto out_unlock; 5650 5651 iodev = bus->range[dev_idx].dev; 5652 5653 out_unlock: 5654 srcu_read_unlock(&kvm->srcu, srcu_idx); 5655 5656 return iodev; 5657 } 5658 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 5659 5660 static int kvm_debugfs_open(struct inode *inode, struct file *file, 5661 int (*get)(void *, u64 *), int (*set)(void *, u64), 5662 const char *fmt) 5663 { 5664 int ret; 5665 struct kvm_stat_data *stat_data = inode->i_private; 5666 5667 /* 5668 * The debugfs files are a reference to the kvm struct which 5669 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 5670 * avoids the race between open and the removal of the debugfs directory. 5671 */ 5672 if (!kvm_get_kvm_safe(stat_data->kvm)) 5673 return -ENOENT; 5674 5675 ret = simple_attr_open(inode, file, get, 5676 kvm_stats_debugfs_mode(stat_data->desc) & 0222 5677 ? set : NULL, fmt); 5678 if (ret) 5679 kvm_put_kvm(stat_data->kvm); 5680 5681 return ret; 5682 } 5683 5684 static int kvm_debugfs_release(struct inode *inode, struct file *file) 5685 { 5686 struct kvm_stat_data *stat_data = inode->i_private; 5687 5688 simple_attr_release(inode, file); 5689 kvm_put_kvm(stat_data->kvm); 5690 5691 return 0; 5692 } 5693 5694 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 5695 { 5696 *val = *(u64 *)((void *)(&kvm->stat) + offset); 5697 5698 return 0; 5699 } 5700 5701 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 5702 { 5703 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 5704 5705 return 0; 5706 } 5707 5708 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 5709 { 5710 unsigned long i; 5711 struct kvm_vcpu *vcpu; 5712 5713 *val = 0; 5714 5715 kvm_for_each_vcpu(i, vcpu, kvm) 5716 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 5717 5718 return 0; 5719 } 5720 5721 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5722 { 5723 unsigned long i; 5724 struct kvm_vcpu *vcpu; 5725 5726 kvm_for_each_vcpu(i, vcpu, kvm) 5727 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5728 5729 return 0; 5730 } 5731 5732 static int kvm_stat_data_get(void *data, u64 *val) 5733 { 5734 int r = -EFAULT; 5735 struct kvm_stat_data *stat_data = data; 5736 5737 switch (stat_data->kind) { 5738 case KVM_STAT_VM: 5739 r = kvm_get_stat_per_vm(stat_data->kvm, 5740 stat_data->desc->desc.offset, val); 5741 break; 5742 case KVM_STAT_VCPU: 5743 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5744 stat_data->desc->desc.offset, val); 5745 break; 5746 } 5747 5748 return r; 5749 } 5750 5751 static int kvm_stat_data_clear(void *data, u64 val) 5752 { 5753 int r = -EFAULT; 5754 struct kvm_stat_data *stat_data = data; 5755 5756 if (val) 5757 return -EINVAL; 5758 5759 switch (stat_data->kind) { 5760 case KVM_STAT_VM: 5761 r = kvm_clear_stat_per_vm(stat_data->kvm, 5762 stat_data->desc->desc.offset); 5763 break; 5764 case KVM_STAT_VCPU: 5765 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5766 stat_data->desc->desc.offset); 5767 break; 5768 } 5769 5770 return r; 5771 } 5772 5773 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5774 { 5775 __simple_attr_check_format("%llu\n", 0ull); 5776 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5777 kvm_stat_data_clear, "%llu\n"); 5778 } 5779 5780 static const struct file_operations stat_fops_per_vm = { 5781 .owner = THIS_MODULE, 5782 .open = kvm_stat_data_open, 5783 .release = kvm_debugfs_release, 5784 .read = simple_attr_read, 5785 .write = simple_attr_write, 5786 .llseek = no_llseek, 5787 }; 5788 5789 static int vm_stat_get(void *_offset, u64 *val) 5790 { 5791 unsigned offset = (long)_offset; 5792 struct kvm *kvm; 5793 u64 tmp_val; 5794 5795 *val = 0; 5796 mutex_lock(&kvm_lock); 5797 list_for_each_entry(kvm, &vm_list, vm_list) { 5798 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5799 *val += tmp_val; 5800 } 5801 mutex_unlock(&kvm_lock); 5802 return 0; 5803 } 5804 5805 static int vm_stat_clear(void *_offset, u64 val) 5806 { 5807 unsigned offset = (long)_offset; 5808 struct kvm *kvm; 5809 5810 if (val) 5811 return -EINVAL; 5812 5813 mutex_lock(&kvm_lock); 5814 list_for_each_entry(kvm, &vm_list, vm_list) { 5815 kvm_clear_stat_per_vm(kvm, offset); 5816 } 5817 mutex_unlock(&kvm_lock); 5818 5819 return 0; 5820 } 5821 5822 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5823 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5824 5825 static int vcpu_stat_get(void *_offset, u64 *val) 5826 { 5827 unsigned offset = (long)_offset; 5828 struct kvm *kvm; 5829 u64 tmp_val; 5830 5831 *val = 0; 5832 mutex_lock(&kvm_lock); 5833 list_for_each_entry(kvm, &vm_list, vm_list) { 5834 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5835 *val += tmp_val; 5836 } 5837 mutex_unlock(&kvm_lock); 5838 return 0; 5839 } 5840 5841 static int vcpu_stat_clear(void *_offset, u64 val) 5842 { 5843 unsigned offset = (long)_offset; 5844 struct kvm *kvm; 5845 5846 if (val) 5847 return -EINVAL; 5848 5849 mutex_lock(&kvm_lock); 5850 list_for_each_entry(kvm, &vm_list, vm_list) { 5851 kvm_clear_stat_per_vcpu(kvm, offset); 5852 } 5853 mutex_unlock(&kvm_lock); 5854 5855 return 0; 5856 } 5857 5858 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5859 "%llu\n"); 5860 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5861 5862 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5863 { 5864 struct kobj_uevent_env *env; 5865 unsigned long long created, active; 5866 5867 if (!kvm_dev.this_device || !kvm) 5868 return; 5869 5870 mutex_lock(&kvm_lock); 5871 if (type == KVM_EVENT_CREATE_VM) { 5872 kvm_createvm_count++; 5873 kvm_active_vms++; 5874 } else if (type == KVM_EVENT_DESTROY_VM) { 5875 kvm_active_vms--; 5876 } 5877 created = kvm_createvm_count; 5878 active = kvm_active_vms; 5879 mutex_unlock(&kvm_lock); 5880 5881 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5882 if (!env) 5883 return; 5884 5885 add_uevent_var(env, "CREATED=%llu", created); 5886 add_uevent_var(env, "COUNT=%llu", active); 5887 5888 if (type == KVM_EVENT_CREATE_VM) { 5889 add_uevent_var(env, "EVENT=create"); 5890 kvm->userspace_pid = task_pid_nr(current); 5891 } else if (type == KVM_EVENT_DESTROY_VM) { 5892 add_uevent_var(env, "EVENT=destroy"); 5893 } 5894 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5895 5896 if (!IS_ERR(kvm->debugfs_dentry)) { 5897 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5898 5899 if (p) { 5900 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5901 if (!IS_ERR(tmp)) 5902 add_uevent_var(env, "STATS_PATH=%s", tmp); 5903 kfree(p); 5904 } 5905 } 5906 /* no need for checks, since we are adding at most only 5 keys */ 5907 env->envp[env->envp_idx++] = NULL; 5908 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5909 kfree(env); 5910 } 5911 5912 static void kvm_init_debug(void) 5913 { 5914 const struct file_operations *fops; 5915 const struct _kvm_stats_desc *pdesc; 5916 int i; 5917 5918 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5919 5920 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5921 pdesc = &kvm_vm_stats_desc[i]; 5922 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5923 fops = &vm_stat_fops; 5924 else 5925 fops = &vm_stat_readonly_fops; 5926 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5927 kvm_debugfs_dir, 5928 (void *)(long)pdesc->desc.offset, fops); 5929 } 5930 5931 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5932 pdesc = &kvm_vcpu_stats_desc[i]; 5933 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5934 fops = &vcpu_stat_fops; 5935 else 5936 fops = &vcpu_stat_readonly_fops; 5937 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5938 kvm_debugfs_dir, 5939 (void *)(long)pdesc->desc.offset, fops); 5940 } 5941 } 5942 5943 static inline 5944 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5945 { 5946 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5947 } 5948 5949 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5950 { 5951 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5952 5953 WRITE_ONCE(vcpu->preempted, false); 5954 WRITE_ONCE(vcpu->ready, false); 5955 5956 __this_cpu_write(kvm_running_vcpu, vcpu); 5957 kvm_arch_sched_in(vcpu, cpu); 5958 kvm_arch_vcpu_load(vcpu, cpu); 5959 } 5960 5961 static void kvm_sched_out(struct preempt_notifier *pn, 5962 struct task_struct *next) 5963 { 5964 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5965 5966 if (current->on_rq) { 5967 WRITE_ONCE(vcpu->preempted, true); 5968 WRITE_ONCE(vcpu->ready, true); 5969 } 5970 kvm_arch_vcpu_put(vcpu); 5971 __this_cpu_write(kvm_running_vcpu, NULL); 5972 } 5973 5974 /** 5975 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5976 * 5977 * We can disable preemption locally around accessing the per-CPU variable, 5978 * and use the resolved vcpu pointer after enabling preemption again, 5979 * because even if the current thread is migrated to another CPU, reading 5980 * the per-CPU value later will give us the same value as we update the 5981 * per-CPU variable in the preempt notifier handlers. 5982 */ 5983 struct kvm_vcpu *kvm_get_running_vcpu(void) 5984 { 5985 struct kvm_vcpu *vcpu; 5986 5987 preempt_disable(); 5988 vcpu = __this_cpu_read(kvm_running_vcpu); 5989 preempt_enable(); 5990 5991 return vcpu; 5992 } 5993 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5994 5995 /** 5996 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5997 */ 5998 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5999 { 6000 return &kvm_running_vcpu; 6001 } 6002 6003 #ifdef CONFIG_GUEST_PERF_EVENTS 6004 static unsigned int kvm_guest_state(void) 6005 { 6006 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 6007 unsigned int state; 6008 6009 if (!kvm_arch_pmi_in_guest(vcpu)) 6010 return 0; 6011 6012 state = PERF_GUEST_ACTIVE; 6013 if (!kvm_arch_vcpu_in_kernel(vcpu)) 6014 state |= PERF_GUEST_USER; 6015 6016 return state; 6017 } 6018 6019 static unsigned long kvm_guest_get_ip(void) 6020 { 6021 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 6022 6023 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 6024 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 6025 return 0; 6026 6027 return kvm_arch_vcpu_get_ip(vcpu); 6028 } 6029 6030 static struct perf_guest_info_callbacks kvm_guest_cbs = { 6031 .state = kvm_guest_state, 6032 .get_ip = kvm_guest_get_ip, 6033 .handle_intel_pt_intr = NULL, 6034 }; 6035 6036 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 6037 { 6038 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 6039 perf_register_guest_info_callbacks(&kvm_guest_cbs); 6040 } 6041 void kvm_unregister_perf_callbacks(void) 6042 { 6043 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 6044 } 6045 #endif 6046 6047 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module) 6048 { 6049 int r; 6050 int cpu; 6051 6052 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6053 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online", 6054 kvm_online_cpu, kvm_offline_cpu); 6055 if (r) 6056 return r; 6057 6058 register_syscore_ops(&kvm_syscore_ops); 6059 #endif 6060 6061 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 6062 if (!vcpu_align) 6063 vcpu_align = __alignof__(struct kvm_vcpu); 6064 kvm_vcpu_cache = 6065 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 6066 SLAB_ACCOUNT, 6067 offsetof(struct kvm_vcpu, arch), 6068 offsetofend(struct kvm_vcpu, stats_id) 6069 - offsetof(struct kvm_vcpu, arch), 6070 NULL); 6071 if (!kvm_vcpu_cache) { 6072 r = -ENOMEM; 6073 goto err_vcpu_cache; 6074 } 6075 6076 for_each_possible_cpu(cpu) { 6077 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 6078 GFP_KERNEL, cpu_to_node(cpu))) { 6079 r = -ENOMEM; 6080 goto err_cpu_kick_mask; 6081 } 6082 } 6083 6084 r = kvm_irqfd_init(); 6085 if (r) 6086 goto err_irqfd; 6087 6088 r = kvm_async_pf_init(); 6089 if (r) 6090 goto err_async_pf; 6091 6092 kvm_chardev_ops.owner = module; 6093 kvm_vm_fops.owner = module; 6094 kvm_vcpu_fops.owner = module; 6095 kvm_device_fops.owner = module; 6096 6097 kvm_preempt_ops.sched_in = kvm_sched_in; 6098 kvm_preempt_ops.sched_out = kvm_sched_out; 6099 6100 kvm_init_debug(); 6101 6102 r = kvm_vfio_ops_init(); 6103 if (WARN_ON_ONCE(r)) 6104 goto err_vfio; 6105 6106 /* 6107 * Registration _must_ be the very last thing done, as this exposes 6108 * /dev/kvm to userspace, i.e. all infrastructure must be setup! 6109 */ 6110 r = misc_register(&kvm_dev); 6111 if (r) { 6112 pr_err("kvm: misc device register failed\n"); 6113 goto err_register; 6114 } 6115 6116 return 0; 6117 6118 err_register: 6119 kvm_vfio_ops_exit(); 6120 err_vfio: 6121 kvm_async_pf_deinit(); 6122 err_async_pf: 6123 kvm_irqfd_exit(); 6124 err_irqfd: 6125 err_cpu_kick_mask: 6126 for_each_possible_cpu(cpu) 6127 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6128 kmem_cache_destroy(kvm_vcpu_cache); 6129 err_vcpu_cache: 6130 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6131 unregister_syscore_ops(&kvm_syscore_ops); 6132 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6133 #endif 6134 return r; 6135 } 6136 EXPORT_SYMBOL_GPL(kvm_init); 6137 6138 void kvm_exit(void) 6139 { 6140 int cpu; 6141 6142 /* 6143 * Note, unregistering /dev/kvm doesn't strictly need to come first, 6144 * fops_get(), a.k.a. try_module_get(), prevents acquiring references 6145 * to KVM while the module is being stopped. 6146 */ 6147 misc_deregister(&kvm_dev); 6148 6149 debugfs_remove_recursive(kvm_debugfs_dir); 6150 for_each_possible_cpu(cpu) 6151 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6152 kmem_cache_destroy(kvm_vcpu_cache); 6153 kvm_vfio_ops_exit(); 6154 kvm_async_pf_deinit(); 6155 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6156 unregister_syscore_ops(&kvm_syscore_ops); 6157 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6158 #endif 6159 kvm_irqfd_exit(); 6160 } 6161 EXPORT_SYMBOL_GPL(kvm_exit); 6162 6163 struct kvm_vm_worker_thread_context { 6164 struct kvm *kvm; 6165 struct task_struct *parent; 6166 struct completion init_done; 6167 kvm_vm_thread_fn_t thread_fn; 6168 uintptr_t data; 6169 int err; 6170 }; 6171 6172 static int kvm_vm_worker_thread(void *context) 6173 { 6174 /* 6175 * The init_context is allocated on the stack of the parent thread, so 6176 * we have to locally copy anything that is needed beyond initialization 6177 */ 6178 struct kvm_vm_worker_thread_context *init_context = context; 6179 struct task_struct *parent; 6180 struct kvm *kvm = init_context->kvm; 6181 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 6182 uintptr_t data = init_context->data; 6183 int err; 6184 6185 err = kthread_park(current); 6186 /* kthread_park(current) is never supposed to return an error */ 6187 WARN_ON(err != 0); 6188 if (err) 6189 goto init_complete; 6190 6191 err = cgroup_attach_task_all(init_context->parent, current); 6192 if (err) { 6193 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 6194 __func__, err); 6195 goto init_complete; 6196 } 6197 6198 set_user_nice(current, task_nice(init_context->parent)); 6199 6200 init_complete: 6201 init_context->err = err; 6202 complete(&init_context->init_done); 6203 init_context = NULL; 6204 6205 if (err) 6206 goto out; 6207 6208 /* Wait to be woken up by the spawner before proceeding. */ 6209 kthread_parkme(); 6210 6211 if (!kthread_should_stop()) 6212 err = thread_fn(kvm, data); 6213 6214 out: 6215 /* 6216 * Move kthread back to its original cgroup to prevent it lingering in 6217 * the cgroup of the VM process, after the latter finishes its 6218 * execution. 6219 * 6220 * kthread_stop() waits on the 'exited' completion condition which is 6221 * set in exit_mm(), via mm_release(), in do_exit(). However, the 6222 * kthread is removed from the cgroup in the cgroup_exit() which is 6223 * called after the exit_mm(). This causes the kthread_stop() to return 6224 * before the kthread actually quits the cgroup. 6225 */ 6226 rcu_read_lock(); 6227 parent = rcu_dereference(current->real_parent); 6228 get_task_struct(parent); 6229 rcu_read_unlock(); 6230 cgroup_attach_task_all(parent, current); 6231 put_task_struct(parent); 6232 6233 return err; 6234 } 6235 6236 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 6237 uintptr_t data, const char *name, 6238 struct task_struct **thread_ptr) 6239 { 6240 struct kvm_vm_worker_thread_context init_context = {}; 6241 struct task_struct *thread; 6242 6243 *thread_ptr = NULL; 6244 init_context.kvm = kvm; 6245 init_context.parent = current; 6246 init_context.thread_fn = thread_fn; 6247 init_context.data = data; 6248 init_completion(&init_context.init_done); 6249 6250 thread = kthread_run(kvm_vm_worker_thread, &init_context, 6251 "%s-%d", name, task_pid_nr(current)); 6252 if (IS_ERR(thread)) 6253 return PTR_ERR(thread); 6254 6255 /* kthread_run is never supposed to return NULL */ 6256 WARN_ON(thread == NULL); 6257 6258 wait_for_completion(&init_context.init_done); 6259 6260 if (!init_context.err) 6261 *thread_ptr = thread; 6262 6263 return init_context.err; 6264 } 6265