1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine (KVM) Hypervisor 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 7 * 8 * Authors: 9 * Avi Kivity <avi@qumranet.com> 10 * Yaniv Kamay <yaniv@qumranet.com> 11 */ 12 13 #include <kvm/iodev.h> 14 15 #include <linux/kvm_host.h> 16 #include <linux/kvm.h> 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/percpu.h> 20 #include <linux/mm.h> 21 #include <linux/miscdevice.h> 22 #include <linux/vmalloc.h> 23 #include <linux/reboot.h> 24 #include <linux/debugfs.h> 25 #include <linux/highmem.h> 26 #include <linux/file.h> 27 #include <linux/syscore_ops.h> 28 #include <linux/cpu.h> 29 #include <linux/sched/signal.h> 30 #include <linux/sched/mm.h> 31 #include <linux/sched/stat.h> 32 #include <linux/cpumask.h> 33 #include <linux/smp.h> 34 #include <linux/anon_inodes.h> 35 #include <linux/profile.h> 36 #include <linux/kvm_para.h> 37 #include <linux/pagemap.h> 38 #include <linux/mman.h> 39 #include <linux/swap.h> 40 #include <linux/bitops.h> 41 #include <linux/spinlock.h> 42 #include <linux/compat.h> 43 #include <linux/srcu.h> 44 #include <linux/hugetlb.h> 45 #include <linux/slab.h> 46 #include <linux/sort.h> 47 #include <linux/bsearch.h> 48 #include <linux/io.h> 49 #include <linux/lockdep.h> 50 #include <linux/kthread.h> 51 #include <linux/suspend.h> 52 53 #include <asm/processor.h> 54 #include <asm/ioctl.h> 55 #include <linux/uaccess.h> 56 57 #include "coalesced_mmio.h" 58 #include "async_pf.h" 59 #include "kvm_mm.h" 60 #include "vfio.h" 61 62 #include <trace/events/ipi.h> 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/kvm.h> 66 67 #include <linux/kvm_dirty_ring.h> 68 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor"); 75 MODULE_LICENSE("GPL"); 76 77 /* Architectures should define their poll value according to the halt latency */ 78 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 79 module_param(halt_poll_ns, uint, 0644); 80 EXPORT_SYMBOL_GPL(halt_poll_ns); 81 82 /* Default doubles per-vcpu halt_poll_ns. */ 83 unsigned int halt_poll_ns_grow = 2; 84 module_param(halt_poll_ns_grow, uint, 0644); 85 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 86 87 /* The start value to grow halt_poll_ns from */ 88 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 89 module_param(halt_poll_ns_grow_start, uint, 0644); 90 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 91 92 /* Default halves per-vcpu halt_poll_ns. */ 93 unsigned int halt_poll_ns_shrink = 2; 94 module_param(halt_poll_ns_shrink, uint, 0644); 95 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 96 97 /* 98 * Allow direct access (from KVM or the CPU) without MMU notifier protection 99 * to unpinned pages. 100 */ 101 static bool allow_unsafe_mappings; 102 module_param(allow_unsafe_mappings, bool, 0444); 103 104 /* 105 * Ordering of locks: 106 * 107 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 108 */ 109 110 DEFINE_MUTEX(kvm_lock); 111 LIST_HEAD(vm_list); 112 113 static struct kmem_cache *kvm_vcpu_cache; 114 115 static __read_mostly struct preempt_ops kvm_preempt_ops; 116 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 117 118 static struct dentry *kvm_debugfs_dir; 119 120 static const struct file_operations stat_fops_per_vm; 121 122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 123 unsigned long arg); 124 #ifdef CONFIG_KVM_COMPAT 125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 126 unsigned long arg); 127 #define KVM_COMPAT(c) .compat_ioctl = (c) 128 #else 129 /* 130 * For architectures that don't implement a compat infrastructure, 131 * adopt a double line of defense: 132 * - Prevent a compat task from opening /dev/kvm 133 * - If the open has been done by a 64bit task, and the KVM fd 134 * passed to a compat task, let the ioctls fail. 135 */ 136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 137 unsigned long arg) { return -EINVAL; } 138 139 static int kvm_no_compat_open(struct inode *inode, struct file *file) 140 { 141 return is_compat_task() ? -ENODEV : 0; 142 } 143 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 144 .open = kvm_no_compat_open 145 #endif 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 #define KVM_EVENT_CREATE_VM 0 150 #define KVM_EVENT_DESTROY_VM 1 151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 152 static unsigned long long kvm_createvm_count; 153 static unsigned long long kvm_active_vms; 154 155 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); 156 157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 158 { 159 } 160 161 /* 162 * Switches to specified vcpu, until a matching vcpu_put() 163 */ 164 void vcpu_load(struct kvm_vcpu *vcpu) 165 { 166 int cpu = get_cpu(); 167 168 __this_cpu_write(kvm_running_vcpu, vcpu); 169 preempt_notifier_register(&vcpu->preempt_notifier); 170 kvm_arch_vcpu_load(vcpu, cpu); 171 put_cpu(); 172 } 173 EXPORT_SYMBOL_GPL(vcpu_load); 174 175 void vcpu_put(struct kvm_vcpu *vcpu) 176 { 177 preempt_disable(); 178 kvm_arch_vcpu_put(vcpu); 179 preempt_notifier_unregister(&vcpu->preempt_notifier); 180 __this_cpu_write(kvm_running_vcpu, NULL); 181 preempt_enable(); 182 } 183 EXPORT_SYMBOL_GPL(vcpu_put); 184 185 /* TODO: merge with kvm_arch_vcpu_should_kick */ 186 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 187 { 188 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 189 190 /* 191 * We need to wait for the VCPU to reenable interrupts and get out of 192 * READING_SHADOW_PAGE_TABLES mode. 193 */ 194 if (req & KVM_REQUEST_WAIT) 195 return mode != OUTSIDE_GUEST_MODE; 196 197 /* 198 * Need to kick a running VCPU, but otherwise there is nothing to do. 199 */ 200 return mode == IN_GUEST_MODE; 201 } 202 203 static void ack_kick(void *_completed) 204 { 205 } 206 207 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait) 208 { 209 if (cpumask_empty(cpus)) 210 return false; 211 212 smp_call_function_many(cpus, ack_kick, NULL, wait); 213 return true; 214 } 215 216 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req, 217 struct cpumask *tmp, int current_cpu) 218 { 219 int cpu; 220 221 if (likely(!(req & KVM_REQUEST_NO_ACTION))) 222 __kvm_make_request(req, vcpu); 223 224 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 225 return; 226 227 /* 228 * Note, the vCPU could get migrated to a different pCPU at any point 229 * after kvm_request_needs_ipi(), which could result in sending an IPI 230 * to the previous pCPU. But, that's OK because the purpose of the IPI 231 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is 232 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES 233 * after this point is also OK, as the requirement is only that KVM wait 234 * for vCPUs that were reading SPTEs _before_ any changes were 235 * finalized. See kvm_vcpu_kick() for more details on handling requests. 236 */ 237 if (kvm_request_needs_ipi(vcpu, req)) { 238 cpu = READ_ONCE(vcpu->cpu); 239 if (cpu != -1 && cpu != current_cpu) 240 __cpumask_set_cpu(cpu, tmp); 241 } 242 } 243 244 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 245 unsigned long *vcpu_bitmap) 246 { 247 struct kvm_vcpu *vcpu; 248 struct cpumask *cpus; 249 int i, me; 250 bool called; 251 252 me = get_cpu(); 253 254 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 255 cpumask_clear(cpus); 256 257 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { 258 vcpu = kvm_get_vcpu(kvm, i); 259 if (!vcpu) 260 continue; 261 kvm_make_vcpu_request(vcpu, req, cpus, me); 262 } 263 264 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 265 put_cpu(); 266 267 return called; 268 } 269 270 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 271 { 272 struct kvm_vcpu *vcpu; 273 struct cpumask *cpus; 274 unsigned long i; 275 bool called; 276 int me; 277 278 me = get_cpu(); 279 280 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); 281 cpumask_clear(cpus); 282 283 kvm_for_each_vcpu(i, vcpu, kvm) 284 kvm_make_vcpu_request(vcpu, req, cpus, me); 285 286 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); 287 put_cpu(); 288 289 return called; 290 } 291 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 292 293 void kvm_flush_remote_tlbs(struct kvm *kvm) 294 { 295 ++kvm->stat.generic.remote_tlb_flush_requests; 296 297 /* 298 * We want to publish modifications to the page tables before reading 299 * mode. Pairs with a memory barrier in arch-specific code. 300 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 301 * and smp_mb in walk_shadow_page_lockless_begin/end. 302 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 303 * 304 * There is already an smp_mb__after_atomic() before 305 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 306 * barrier here. 307 */ 308 if (!kvm_arch_flush_remote_tlbs(kvm) 309 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 310 ++kvm->stat.generic.remote_tlb_flush; 311 } 312 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 313 314 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) 315 { 316 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) 317 return; 318 319 /* 320 * Fall back to a flushing entire TLBs if the architecture range-based 321 * TLB invalidation is unsupported or can't be performed for whatever 322 * reason. 323 */ 324 kvm_flush_remote_tlbs(kvm); 325 } 326 327 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, 328 const struct kvm_memory_slot *memslot) 329 { 330 /* 331 * All current use cases for flushing the TLBs for a specific memslot 332 * are related to dirty logging, and many do the TLB flush out of 333 * mmu_lock. The interaction between the various operations on memslot 334 * must be serialized by slots_locks to ensure the TLB flush from one 335 * operation is observed by any other operation on the same memslot. 336 */ 337 lockdep_assert_held(&kvm->slots_lock); 338 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); 339 } 340 341 static void kvm_flush_shadow_all(struct kvm *kvm) 342 { 343 kvm_arch_flush_shadow_all(kvm); 344 kvm_arch_guest_memory_reclaimed(kvm); 345 } 346 347 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 348 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 349 gfp_t gfp_flags) 350 { 351 void *page; 352 353 gfp_flags |= mc->gfp_zero; 354 355 if (mc->kmem_cache) 356 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 357 358 page = (void *)__get_free_page(gfp_flags); 359 if (page && mc->init_value) 360 memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64)); 361 return page; 362 } 363 364 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) 365 { 366 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT; 367 void *obj; 368 369 if (mc->nobjs >= min) 370 return 0; 371 372 if (unlikely(!mc->objects)) { 373 if (WARN_ON_ONCE(!capacity)) 374 return -EIO; 375 376 /* 377 * Custom init values can be used only for page allocations, 378 * and obviously conflict with __GFP_ZERO. 379 */ 380 if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero))) 381 return -EIO; 382 383 mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp); 384 if (!mc->objects) 385 return -ENOMEM; 386 387 mc->capacity = capacity; 388 } 389 390 /* It is illegal to request a different capacity across topups. */ 391 if (WARN_ON_ONCE(mc->capacity != capacity)) 392 return -EIO; 393 394 while (mc->nobjs < mc->capacity) { 395 obj = mmu_memory_cache_alloc_obj(mc, gfp); 396 if (!obj) 397 return mc->nobjs >= min ? 0 : -ENOMEM; 398 mc->objects[mc->nobjs++] = obj; 399 } 400 return 0; 401 } 402 403 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 404 { 405 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min); 406 } 407 408 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 409 { 410 return mc->nobjs; 411 } 412 413 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 414 { 415 while (mc->nobjs) { 416 if (mc->kmem_cache) 417 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 418 else 419 free_page((unsigned long)mc->objects[--mc->nobjs]); 420 } 421 422 kvfree(mc->objects); 423 424 mc->objects = NULL; 425 mc->capacity = 0; 426 } 427 428 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 429 { 430 void *p; 431 432 if (WARN_ON(!mc->nobjs)) 433 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 434 else 435 p = mc->objects[--mc->nobjs]; 436 BUG_ON(!p); 437 return p; 438 } 439 #endif 440 441 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 442 { 443 mutex_init(&vcpu->mutex); 444 vcpu->cpu = -1; 445 vcpu->kvm = kvm; 446 vcpu->vcpu_id = id; 447 vcpu->pid = NULL; 448 rwlock_init(&vcpu->pid_lock); 449 #ifndef __KVM_HAVE_ARCH_WQP 450 rcuwait_init(&vcpu->wait); 451 #endif 452 kvm_async_pf_vcpu_init(vcpu); 453 454 kvm_vcpu_set_in_spin_loop(vcpu, false); 455 kvm_vcpu_set_dy_eligible(vcpu, false); 456 vcpu->preempted = false; 457 vcpu->ready = false; 458 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 459 vcpu->last_used_slot = NULL; 460 461 /* Fill the stats id string for the vcpu */ 462 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 463 task_pid_nr(current), id); 464 } 465 466 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 467 { 468 kvm_arch_vcpu_destroy(vcpu); 469 kvm_dirty_ring_free(&vcpu->dirty_ring); 470 471 /* 472 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 473 * the vcpu->pid pointer, and at destruction time all file descriptors 474 * are already gone. 475 */ 476 put_pid(vcpu->pid); 477 478 free_page((unsigned long)vcpu->run); 479 kmem_cache_free(kvm_vcpu_cache, vcpu); 480 } 481 482 void kvm_destroy_vcpus(struct kvm *kvm) 483 { 484 unsigned long i; 485 struct kvm_vcpu *vcpu; 486 487 kvm_for_each_vcpu(i, vcpu, kvm) { 488 kvm_vcpu_destroy(vcpu); 489 xa_erase(&kvm->vcpu_array, i); 490 491 /* 492 * Assert that the vCPU isn't visible in any way, to ensure KVM 493 * doesn't trigger a use-after-free if destroying vCPUs results 494 * in VM-wide request, e.g. to flush remote TLBs when tearing 495 * down MMUs, or to mark the VM dead if a KVM_BUG_ON() fires. 496 */ 497 WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i)); 498 } 499 500 atomic_set(&kvm->online_vcpus, 0); 501 } 502 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); 503 504 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 506 { 507 return container_of(mn, struct kvm, mmu_notifier); 508 } 509 510 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 511 512 typedef void (*on_lock_fn_t)(struct kvm *kvm); 513 514 struct kvm_mmu_notifier_range { 515 /* 516 * 64-bit addresses, as KVM notifiers can operate on host virtual 517 * addresses (unsigned long) and guest physical addresses (64-bit). 518 */ 519 u64 start; 520 u64 end; 521 union kvm_mmu_notifier_arg arg; 522 gfn_handler_t handler; 523 on_lock_fn_t on_lock; 524 bool flush_on_ret; 525 bool may_block; 526 bool lockless; 527 }; 528 529 /* 530 * The inner-most helper returns a tuple containing the return value from the 531 * arch- and action-specific handler, plus a flag indicating whether or not at 532 * least one memslot was found, i.e. if the handler found guest memory. 533 * 534 * Note, most notifiers are averse to booleans, so even though KVM tracks the 535 * return from arch code as a bool, outer helpers will cast it to an int. :-( 536 */ 537 typedef struct kvm_mmu_notifier_return { 538 bool ret; 539 bool found_memslot; 540 } kvm_mn_ret_t; 541 542 /* 543 * Use a dedicated stub instead of NULL to indicate that there is no callback 544 * function/handler. The compiler technically can't guarantee that a real 545 * function will have a non-zero address, and so it will generate code to 546 * check for !NULL, whereas comparing against a stub will be elided at compile 547 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 548 */ 549 static void kvm_null_fn(void) 550 { 551 552 } 553 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 554 555 /* Iterate over each memslot intersecting [start, last] (inclusive) range */ 556 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ 557 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ 558 node; \ 559 node = interval_tree_iter_next(node, start, last)) \ 560 561 static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm, 562 const struct kvm_mmu_notifier_range *range) 563 { 564 struct kvm_mmu_notifier_return r = { 565 .ret = false, 566 .found_memslot = false, 567 }; 568 struct kvm_gfn_range gfn_range; 569 struct kvm_memory_slot *slot; 570 struct kvm_memslots *slots; 571 int i, idx; 572 573 if (WARN_ON_ONCE(range->end <= range->start)) 574 return r; 575 576 /* A null handler is allowed if and only if on_lock() is provided. */ 577 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 578 IS_KVM_NULL_FN(range->handler))) 579 return r; 580 581 /* on_lock will never be called for lockless walks */ 582 if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock))) 583 return r; 584 585 idx = srcu_read_lock(&kvm->srcu); 586 587 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { 588 struct interval_tree_node *node; 589 590 slots = __kvm_memslots(kvm, i); 591 kvm_for_each_memslot_in_hva_range(node, slots, 592 range->start, range->end - 1) { 593 unsigned long hva_start, hva_end; 594 595 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); 596 hva_start = max_t(unsigned long, range->start, slot->userspace_addr); 597 hva_end = min_t(unsigned long, range->end, 598 slot->userspace_addr + (slot->npages << PAGE_SHIFT)); 599 600 /* 601 * To optimize for the likely case where the address 602 * range is covered by zero or one memslots, don't 603 * bother making these conditional (to avoid writes on 604 * the second or later invocation of the handler). 605 */ 606 gfn_range.arg = range->arg; 607 gfn_range.may_block = range->may_block; 608 /* 609 * HVA-based notifications aren't relevant to private 610 * mappings as they don't have a userspace mapping. 611 */ 612 gfn_range.attr_filter = KVM_FILTER_SHARED; 613 614 /* 615 * {gfn(page) | page intersects with [hva_start, hva_end)} = 616 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 617 */ 618 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 619 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 620 gfn_range.slot = slot; 621 gfn_range.lockless = range->lockless; 622 623 if (!r.found_memslot) { 624 r.found_memslot = true; 625 if (!range->lockless) { 626 KVM_MMU_LOCK(kvm); 627 if (!IS_KVM_NULL_FN(range->on_lock)) 628 range->on_lock(kvm); 629 630 if (IS_KVM_NULL_FN(range->handler)) 631 goto mmu_unlock; 632 } 633 } 634 r.ret |= range->handler(kvm, &gfn_range); 635 } 636 } 637 638 if (range->flush_on_ret && r.ret) 639 kvm_flush_remote_tlbs(kvm); 640 641 mmu_unlock: 642 if (r.found_memslot && !range->lockless) 643 KVM_MMU_UNLOCK(kvm); 644 645 srcu_read_unlock(&kvm->srcu, idx); 646 647 return r; 648 } 649 650 static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn, 651 unsigned long start, 652 unsigned long end, 653 gfn_handler_t handler, 654 bool flush_on_ret) 655 { 656 struct kvm *kvm = mmu_notifier_to_kvm(mn); 657 const struct kvm_mmu_notifier_range range = { 658 .start = start, 659 .end = end, 660 .handler = handler, 661 .on_lock = (void *)kvm_null_fn, 662 .flush_on_ret = flush_on_ret, 663 .may_block = false, 664 .lockless = IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING), 665 }; 666 667 return kvm_handle_hva_range(kvm, &range).ret; 668 } 669 670 static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn, 671 unsigned long start, 672 unsigned long end, 673 gfn_handler_t handler) 674 { 675 return kvm_age_hva_range(mn, start, end, handler, false); 676 } 677 678 void kvm_mmu_invalidate_begin(struct kvm *kvm) 679 { 680 lockdep_assert_held_write(&kvm->mmu_lock); 681 /* 682 * The count increase must become visible at unlock time as no 683 * spte can be established without taking the mmu_lock and 684 * count is also read inside the mmu_lock critical section. 685 */ 686 kvm->mmu_invalidate_in_progress++; 687 688 if (likely(kvm->mmu_invalidate_in_progress == 1)) { 689 kvm->mmu_invalidate_range_start = INVALID_GPA; 690 kvm->mmu_invalidate_range_end = INVALID_GPA; 691 } 692 } 693 694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) 695 { 696 lockdep_assert_held_write(&kvm->mmu_lock); 697 698 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); 699 700 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) { 701 kvm->mmu_invalidate_range_start = start; 702 kvm->mmu_invalidate_range_end = end; 703 } else { 704 /* 705 * Fully tracking multiple concurrent ranges has diminishing 706 * returns. Keep things simple and just find the minimal range 707 * which includes the current and new ranges. As there won't be 708 * enough information to subtract a range after its invalidate 709 * completes, any ranges invalidated concurrently will 710 * accumulate and persist until all outstanding invalidates 711 * complete. 712 */ 713 kvm->mmu_invalidate_range_start = 714 min(kvm->mmu_invalidate_range_start, start); 715 kvm->mmu_invalidate_range_end = 716 max(kvm->mmu_invalidate_range_end, end); 717 } 718 } 719 720 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 721 { 722 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); 723 return kvm_unmap_gfn_range(kvm, range); 724 } 725 726 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 727 const struct mmu_notifier_range *range) 728 { 729 struct kvm *kvm = mmu_notifier_to_kvm(mn); 730 const struct kvm_mmu_notifier_range hva_range = { 731 .start = range->start, 732 .end = range->end, 733 .handler = kvm_mmu_unmap_gfn_range, 734 .on_lock = kvm_mmu_invalidate_begin, 735 .flush_on_ret = true, 736 .may_block = mmu_notifier_range_blockable(range), 737 }; 738 739 trace_kvm_unmap_hva_range(range->start, range->end); 740 741 /* 742 * Prevent memslot modification between range_start() and range_end() 743 * so that conditionally locking provides the same result in both 744 * functions. Without that guarantee, the mmu_invalidate_in_progress 745 * adjustments will be imbalanced. 746 * 747 * Pairs with the decrement in range_end(). 748 */ 749 spin_lock(&kvm->mn_invalidate_lock); 750 kvm->mn_active_invalidate_count++; 751 spin_unlock(&kvm->mn_invalidate_lock); 752 753 /* 754 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e. 755 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 756 * each cache's lock. There are relatively few caches in existence at 757 * any given time, and the caches themselves can check for hva overlap, 758 * i.e. don't need to rely on memslot overlap checks for performance. 759 * Because this runs without holding mmu_lock, the pfn caches must use 760 * mn_active_invalidate_count (see above) instead of 761 * mmu_invalidate_in_progress. 762 */ 763 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end); 764 765 /* 766 * If one or more memslots were found and thus zapped, notify arch code 767 * that guest memory has been reclaimed. This needs to be done *after* 768 * dropping mmu_lock, as x86's reclaim path is slooooow. 769 */ 770 if (kvm_handle_hva_range(kvm, &hva_range).found_memslot) 771 kvm_arch_guest_memory_reclaimed(kvm); 772 773 return 0; 774 } 775 776 void kvm_mmu_invalidate_end(struct kvm *kvm) 777 { 778 lockdep_assert_held_write(&kvm->mmu_lock); 779 780 /* 781 * This sequence increase will notify the kvm page fault that 782 * the page that is going to be mapped in the spte could have 783 * been freed. 784 */ 785 kvm->mmu_invalidate_seq++; 786 smp_wmb(); 787 /* 788 * The above sequence increase must be visible before the 789 * below count decrease, which is ensured by the smp_wmb above 790 * in conjunction with the smp_rmb in mmu_invalidate_retry(). 791 */ 792 kvm->mmu_invalidate_in_progress--; 793 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm); 794 795 /* 796 * Assert that at least one range was added between start() and end(). 797 * Not adding a range isn't fatal, but it is a KVM bug. 798 */ 799 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA); 800 } 801 802 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 803 const struct mmu_notifier_range *range) 804 { 805 struct kvm *kvm = mmu_notifier_to_kvm(mn); 806 const struct kvm_mmu_notifier_range hva_range = { 807 .start = range->start, 808 .end = range->end, 809 .handler = (void *)kvm_null_fn, 810 .on_lock = kvm_mmu_invalidate_end, 811 .flush_on_ret = false, 812 .may_block = mmu_notifier_range_blockable(range), 813 }; 814 bool wake; 815 816 kvm_handle_hva_range(kvm, &hva_range); 817 818 /* Pairs with the increment in range_start(). */ 819 spin_lock(&kvm->mn_invalidate_lock); 820 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count)) 821 --kvm->mn_active_invalidate_count; 822 wake = !kvm->mn_active_invalidate_count; 823 spin_unlock(&kvm->mn_invalidate_lock); 824 825 /* 826 * There can only be one waiter, since the wait happens under 827 * slots_lock. 828 */ 829 if (wake) 830 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); 831 } 832 833 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 834 struct mm_struct *mm, 835 unsigned long start, 836 unsigned long end) 837 { 838 trace_kvm_age_hva(start, end); 839 840 return kvm_age_hva_range(mn, start, end, kvm_age_gfn, 841 !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG)); 842 } 843 844 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 845 struct mm_struct *mm, 846 unsigned long start, 847 unsigned long end) 848 { 849 trace_kvm_age_hva(start, end); 850 851 /* 852 * Even though we do not flush TLB, this will still adversely 853 * affect performance on pre-Haswell Intel EPT, where there is 854 * no EPT Access Bit to clear so that we have to tear down EPT 855 * tables instead. If we find this unacceptable, we can always 856 * add a parameter to kvm_age_hva so that it effectively doesn't 857 * do anything on clear_young. 858 * 859 * Also note that currently we never issue secondary TLB flushes 860 * from clear_young, leaving this job up to the regular system 861 * cadence. If we find this inaccurate, we might come up with a 862 * more sophisticated heuristic later. 863 */ 864 return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn); 865 } 866 867 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 868 struct mm_struct *mm, 869 unsigned long address) 870 { 871 trace_kvm_test_age_hva(address); 872 873 return kvm_age_hva_range_no_flush(mn, address, address + 1, 874 kvm_test_age_gfn); 875 } 876 877 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 878 struct mm_struct *mm) 879 { 880 struct kvm *kvm = mmu_notifier_to_kvm(mn); 881 int idx; 882 883 idx = srcu_read_lock(&kvm->srcu); 884 kvm_flush_shadow_all(kvm); 885 srcu_read_unlock(&kvm->srcu, idx); 886 } 887 888 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 889 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 890 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 891 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 892 .clear_young = kvm_mmu_notifier_clear_young, 893 .test_young = kvm_mmu_notifier_test_young, 894 .release = kvm_mmu_notifier_release, 895 }; 896 897 static int kvm_init_mmu_notifier(struct kvm *kvm) 898 { 899 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 900 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 901 } 902 903 #else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 904 905 static int kvm_init_mmu_notifier(struct kvm *kvm) 906 { 907 return 0; 908 } 909 910 #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */ 911 912 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 913 static int kvm_pm_notifier_call(struct notifier_block *bl, 914 unsigned long state, 915 void *unused) 916 { 917 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 918 919 return kvm_arch_pm_notifier(kvm, state); 920 } 921 922 static void kvm_init_pm_notifier(struct kvm *kvm) 923 { 924 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 925 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 926 kvm->pm_notifier.priority = INT_MAX; 927 register_pm_notifier(&kvm->pm_notifier); 928 } 929 930 static void kvm_destroy_pm_notifier(struct kvm *kvm) 931 { 932 unregister_pm_notifier(&kvm->pm_notifier); 933 } 934 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 935 static void kvm_init_pm_notifier(struct kvm *kvm) 936 { 937 } 938 939 static void kvm_destroy_pm_notifier(struct kvm *kvm) 940 { 941 } 942 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 943 944 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 945 { 946 if (!memslot->dirty_bitmap) 947 return; 948 949 vfree(memslot->dirty_bitmap); 950 memslot->dirty_bitmap = NULL; 951 } 952 953 /* This does not remove the slot from struct kvm_memslots data structures */ 954 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 955 { 956 if (slot->flags & KVM_MEM_GUEST_MEMFD) 957 kvm_gmem_unbind(slot); 958 959 kvm_destroy_dirty_bitmap(slot); 960 961 kvm_arch_free_memslot(kvm, slot); 962 963 kfree(slot); 964 } 965 966 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 967 { 968 struct hlist_node *idnode; 969 struct kvm_memory_slot *memslot; 970 int bkt; 971 972 /* 973 * The same memslot objects live in both active and inactive sets, 974 * arbitrarily free using index '1' so the second invocation of this 975 * function isn't operating over a structure with dangling pointers 976 * (even though this function isn't actually touching them). 977 */ 978 if (!slots->node_idx) 979 return; 980 981 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) 982 kvm_free_memslot(kvm, memslot); 983 } 984 985 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 986 { 987 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 988 case KVM_STATS_TYPE_INSTANT: 989 return 0444; 990 case KVM_STATS_TYPE_CUMULATIVE: 991 case KVM_STATS_TYPE_PEAK: 992 default: 993 return 0644; 994 } 995 } 996 997 998 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 999 { 1000 int i; 1001 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1002 kvm_vcpu_stats_header.num_desc; 1003 1004 if (IS_ERR(kvm->debugfs_dentry)) 1005 return; 1006 1007 debugfs_remove_recursive(kvm->debugfs_dentry); 1008 1009 if (kvm->debugfs_stat_data) { 1010 for (i = 0; i < kvm_debugfs_num_entries; i++) 1011 kfree(kvm->debugfs_stat_data[i]); 1012 kfree(kvm->debugfs_stat_data); 1013 } 1014 } 1015 1016 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname) 1017 { 1018 static DEFINE_MUTEX(kvm_debugfs_lock); 1019 struct dentry *dent; 1020 char dir_name[ITOA_MAX_LEN * 2]; 1021 struct kvm_stat_data *stat_data; 1022 const struct _kvm_stats_desc *pdesc; 1023 int i, ret = -ENOMEM; 1024 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1025 kvm_vcpu_stats_header.num_desc; 1026 1027 if (!debugfs_initialized()) 1028 return 0; 1029 1030 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname); 1031 mutex_lock(&kvm_debugfs_lock); 1032 dent = debugfs_lookup(dir_name, kvm_debugfs_dir); 1033 if (dent) { 1034 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name); 1035 dput(dent); 1036 mutex_unlock(&kvm_debugfs_lock); 1037 return 0; 1038 } 1039 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir); 1040 mutex_unlock(&kvm_debugfs_lock); 1041 if (IS_ERR(dent)) 1042 return 0; 1043 1044 kvm->debugfs_dentry = dent; 1045 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 1046 sizeof(*kvm->debugfs_stat_data), 1047 GFP_KERNEL_ACCOUNT); 1048 if (!kvm->debugfs_stat_data) 1049 goto out_err; 1050 1051 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 1052 pdesc = &kvm_vm_stats_desc[i]; 1053 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1054 if (!stat_data) 1055 goto out_err; 1056 1057 stat_data->kvm = kvm; 1058 stat_data->desc = pdesc; 1059 stat_data->kind = KVM_STAT_VM; 1060 kvm->debugfs_stat_data[i] = stat_data; 1061 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1062 kvm->debugfs_dentry, stat_data, 1063 &stat_fops_per_vm); 1064 } 1065 1066 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 1067 pdesc = &kvm_vcpu_stats_desc[i]; 1068 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 1069 if (!stat_data) 1070 goto out_err; 1071 1072 stat_data->kvm = kvm; 1073 stat_data->desc = pdesc; 1074 stat_data->kind = KVM_STAT_VCPU; 1075 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 1076 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 1077 kvm->debugfs_dentry, stat_data, 1078 &stat_fops_per_vm); 1079 } 1080 1081 kvm_arch_create_vm_debugfs(kvm); 1082 return 0; 1083 out_err: 1084 kvm_destroy_vm_debugfs(kvm); 1085 return ret; 1086 } 1087 1088 /* 1089 * Called just after removing the VM from the vm_list, but before doing any 1090 * other destruction. 1091 */ 1092 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 1093 { 1094 } 1095 1096 /* 1097 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should 1098 * be setup already, so we can create arch-specific debugfs entries under it. 1099 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so 1100 * a per-arch destroy interface is not needed. 1101 */ 1102 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) 1103 { 1104 } 1105 1106 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1107 { 1108 struct kvm *kvm = kvm_arch_alloc_vm(); 1109 struct kvm_memslots *slots; 1110 int r, i, j; 1111 1112 if (!kvm) 1113 return ERR_PTR(-ENOMEM); 1114 1115 KVM_MMU_LOCK_INIT(kvm); 1116 mmgrab(current->mm); 1117 kvm->mm = current->mm; 1118 kvm_eventfd_init(kvm); 1119 mutex_init(&kvm->lock); 1120 mutex_init(&kvm->irq_lock); 1121 mutex_init(&kvm->slots_lock); 1122 mutex_init(&kvm->slots_arch_lock); 1123 spin_lock_init(&kvm->mn_invalidate_lock); 1124 rcuwait_init(&kvm->mn_memslots_update_rcuwait); 1125 xa_init(&kvm->vcpu_array); 1126 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 1127 xa_init(&kvm->mem_attr_array); 1128 #endif 1129 1130 INIT_LIST_HEAD(&kvm->gpc_list); 1131 spin_lock_init(&kvm->gpc_lock); 1132 1133 INIT_LIST_HEAD(&kvm->devices); 1134 kvm->max_vcpus = KVM_MAX_VCPUS; 1135 1136 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 1137 1138 /* 1139 * Force subsequent debugfs file creations to fail if the VM directory 1140 * is not created (by kvm_create_vm_debugfs()). 1141 */ 1142 kvm->debugfs_dentry = ERR_PTR(-ENOENT); 1143 1144 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d", 1145 task_pid_nr(current)); 1146 1147 r = -ENOMEM; 1148 if (init_srcu_struct(&kvm->srcu)) 1149 goto out_err_no_srcu; 1150 if (init_srcu_struct(&kvm->irq_srcu)) 1151 goto out_err_no_irq_srcu; 1152 1153 r = kvm_init_irq_routing(kvm); 1154 if (r) 1155 goto out_err_no_irq_routing; 1156 1157 refcount_set(&kvm->users_count, 1); 1158 1159 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { 1160 for (j = 0; j < 2; j++) { 1161 slots = &kvm->__memslots[i][j]; 1162 1163 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); 1164 slots->hva_tree = RB_ROOT_CACHED; 1165 slots->gfn_tree = RB_ROOT; 1166 hash_init(slots->id_hash); 1167 slots->node_idx = j; 1168 1169 /* Generations must be different for each address space. */ 1170 slots->generation = i; 1171 } 1172 1173 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); 1174 } 1175 1176 r = -ENOMEM; 1177 for (i = 0; i < KVM_NR_BUSES; i++) { 1178 rcu_assign_pointer(kvm->buses[i], 1179 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1180 if (!kvm->buses[i]) 1181 goto out_err_no_arch_destroy_vm; 1182 } 1183 1184 r = kvm_arch_init_vm(kvm, type); 1185 if (r) 1186 goto out_err_no_arch_destroy_vm; 1187 1188 r = kvm_enable_virtualization(); 1189 if (r) 1190 goto out_err_no_disable; 1191 1192 #ifdef CONFIG_HAVE_KVM_IRQCHIP 1193 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1194 #endif 1195 1196 r = kvm_init_mmu_notifier(kvm); 1197 if (r) 1198 goto out_err_no_mmu_notifier; 1199 1200 r = kvm_coalesced_mmio_init(kvm); 1201 if (r < 0) 1202 goto out_no_coalesced_mmio; 1203 1204 r = kvm_create_vm_debugfs(kvm, fdname); 1205 if (r) 1206 goto out_err_no_debugfs; 1207 1208 mutex_lock(&kvm_lock); 1209 list_add(&kvm->vm_list, &vm_list); 1210 mutex_unlock(&kvm_lock); 1211 1212 preempt_notifier_inc(); 1213 kvm_init_pm_notifier(kvm); 1214 1215 return kvm; 1216 1217 out_err_no_debugfs: 1218 kvm_coalesced_mmio_free(kvm); 1219 out_no_coalesced_mmio: 1220 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1221 if (kvm->mmu_notifier.ops) 1222 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1223 #endif 1224 out_err_no_mmu_notifier: 1225 kvm_disable_virtualization(); 1226 out_err_no_disable: 1227 kvm_arch_destroy_vm(kvm); 1228 out_err_no_arch_destroy_vm: 1229 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1230 for (i = 0; i < KVM_NR_BUSES; i++) 1231 kfree(kvm_get_bus(kvm, i)); 1232 kvm_free_irq_routing(kvm); 1233 out_err_no_irq_routing: 1234 cleanup_srcu_struct(&kvm->irq_srcu); 1235 out_err_no_irq_srcu: 1236 cleanup_srcu_struct(&kvm->srcu); 1237 out_err_no_srcu: 1238 kvm_arch_free_vm(kvm); 1239 mmdrop(current->mm); 1240 return ERR_PTR(r); 1241 } 1242 1243 static void kvm_destroy_devices(struct kvm *kvm) 1244 { 1245 struct kvm_device *dev, *tmp; 1246 1247 /* 1248 * We do not need to take the kvm->lock here, because nobody else 1249 * has a reference to the struct kvm at this point and therefore 1250 * cannot access the devices list anyhow. 1251 * 1252 * The device list is generally managed as an rculist, but list_del() 1253 * is used intentionally here. If a bug in KVM introduced a reader that 1254 * was not backed by a reference on the kvm struct, the hope is that 1255 * it'd consume the poisoned forward pointer instead of suffering a 1256 * use-after-free, even though this cannot be guaranteed. 1257 */ 1258 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1259 list_del(&dev->vm_node); 1260 dev->ops->destroy(dev); 1261 } 1262 } 1263 1264 static void kvm_destroy_vm(struct kvm *kvm) 1265 { 1266 int i; 1267 struct mm_struct *mm = kvm->mm; 1268 1269 kvm_destroy_pm_notifier(kvm); 1270 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1271 kvm_destroy_vm_debugfs(kvm); 1272 mutex_lock(&kvm_lock); 1273 list_del(&kvm->vm_list); 1274 mutex_unlock(&kvm_lock); 1275 kvm_arch_pre_destroy_vm(kvm); 1276 1277 kvm_free_irq_routing(kvm); 1278 for (i = 0; i < KVM_NR_BUSES; i++) { 1279 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1280 1281 if (bus) 1282 kvm_io_bus_destroy(bus); 1283 kvm->buses[i] = NULL; 1284 } 1285 kvm_coalesced_mmio_free(kvm); 1286 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER 1287 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1288 /* 1289 * At this point, pending calls to invalidate_range_start() 1290 * have completed but no more MMU notifiers will run, so 1291 * mn_active_invalidate_count may remain unbalanced. 1292 * No threads can be waiting in kvm_swap_active_memslots() as the 1293 * last reference on KVM has been dropped, but freeing 1294 * memslots would deadlock without this manual intervention. 1295 * 1296 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU 1297 * notifier between a start() and end(), then there shouldn't be any 1298 * in-progress invalidations. 1299 */ 1300 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); 1301 if (kvm->mn_active_invalidate_count) 1302 kvm->mn_active_invalidate_count = 0; 1303 else 1304 WARN_ON(kvm->mmu_invalidate_in_progress); 1305 #else 1306 kvm_flush_shadow_all(kvm); 1307 #endif 1308 kvm_arch_destroy_vm(kvm); 1309 kvm_destroy_devices(kvm); 1310 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { 1311 kvm_free_memslots(kvm, &kvm->__memslots[i][0]); 1312 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1313 } 1314 cleanup_srcu_struct(&kvm->irq_srcu); 1315 cleanup_srcu_struct(&kvm->srcu); 1316 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 1317 xa_destroy(&kvm->mem_attr_array); 1318 #endif 1319 kvm_arch_free_vm(kvm); 1320 preempt_notifier_dec(); 1321 kvm_disable_virtualization(); 1322 mmdrop(mm); 1323 } 1324 1325 void kvm_get_kvm(struct kvm *kvm) 1326 { 1327 refcount_inc(&kvm->users_count); 1328 } 1329 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1330 1331 /* 1332 * Make sure the vm is not during destruction, which is a safe version of 1333 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise. 1334 */ 1335 bool kvm_get_kvm_safe(struct kvm *kvm) 1336 { 1337 return refcount_inc_not_zero(&kvm->users_count); 1338 } 1339 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe); 1340 1341 void kvm_put_kvm(struct kvm *kvm) 1342 { 1343 if (refcount_dec_and_test(&kvm->users_count)) 1344 kvm_destroy_vm(kvm); 1345 } 1346 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1347 1348 /* 1349 * Used to put a reference that was taken on behalf of an object associated 1350 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1351 * of the new file descriptor fails and the reference cannot be transferred to 1352 * its final owner. In such cases, the caller is still actively using @kvm and 1353 * will fail miserably if the refcount unexpectedly hits zero. 1354 */ 1355 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1356 { 1357 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1358 } 1359 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1360 1361 static int kvm_vm_release(struct inode *inode, struct file *filp) 1362 { 1363 struct kvm *kvm = filp->private_data; 1364 1365 kvm_irqfd_release(kvm); 1366 1367 kvm_put_kvm(kvm); 1368 return 0; 1369 } 1370 1371 int kvm_trylock_all_vcpus(struct kvm *kvm) 1372 { 1373 struct kvm_vcpu *vcpu; 1374 unsigned long i, j; 1375 1376 lockdep_assert_held(&kvm->lock); 1377 1378 kvm_for_each_vcpu(i, vcpu, kvm) 1379 if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock)) 1380 goto out_unlock; 1381 return 0; 1382 1383 out_unlock: 1384 kvm_for_each_vcpu(j, vcpu, kvm) { 1385 if (i == j) 1386 break; 1387 mutex_unlock(&vcpu->mutex); 1388 } 1389 return -EINTR; 1390 } 1391 EXPORT_SYMBOL_GPL(kvm_trylock_all_vcpus); 1392 1393 int kvm_lock_all_vcpus(struct kvm *kvm) 1394 { 1395 struct kvm_vcpu *vcpu; 1396 unsigned long i, j; 1397 int r; 1398 1399 lockdep_assert_held(&kvm->lock); 1400 1401 kvm_for_each_vcpu(i, vcpu, kvm) { 1402 r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock); 1403 if (r) 1404 goto out_unlock; 1405 } 1406 return 0; 1407 1408 out_unlock: 1409 kvm_for_each_vcpu(j, vcpu, kvm) { 1410 if (i == j) 1411 break; 1412 mutex_unlock(&vcpu->mutex); 1413 } 1414 return r; 1415 } 1416 EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus); 1417 1418 void kvm_unlock_all_vcpus(struct kvm *kvm) 1419 { 1420 struct kvm_vcpu *vcpu; 1421 unsigned long i; 1422 1423 lockdep_assert_held(&kvm->lock); 1424 1425 kvm_for_each_vcpu(i, vcpu, kvm) 1426 mutex_unlock(&vcpu->mutex); 1427 } 1428 EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus); 1429 1430 /* 1431 * Allocation size is twice as large as the actual dirty bitmap size. 1432 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1433 */ 1434 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1435 { 1436 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); 1437 1438 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT); 1439 if (!memslot->dirty_bitmap) 1440 return -ENOMEM; 1441 1442 return 0; 1443 } 1444 1445 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) 1446 { 1447 struct kvm_memslots *active = __kvm_memslots(kvm, as_id); 1448 int node_idx_inactive = active->node_idx ^ 1; 1449 1450 return &kvm->__memslots[as_id][node_idx_inactive]; 1451 } 1452 1453 /* 1454 * Helper to get the address space ID when one of memslot pointers may be NULL. 1455 * This also serves as a sanity that at least one of the pointers is non-NULL, 1456 * and that their address space IDs don't diverge. 1457 */ 1458 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, 1459 struct kvm_memory_slot *b) 1460 { 1461 if (WARN_ON_ONCE(!a && !b)) 1462 return 0; 1463 1464 if (!a) 1465 return b->as_id; 1466 if (!b) 1467 return a->as_id; 1468 1469 WARN_ON_ONCE(a->as_id != b->as_id); 1470 return a->as_id; 1471 } 1472 1473 static void kvm_insert_gfn_node(struct kvm_memslots *slots, 1474 struct kvm_memory_slot *slot) 1475 { 1476 struct rb_root *gfn_tree = &slots->gfn_tree; 1477 struct rb_node **node, *parent; 1478 int idx = slots->node_idx; 1479 1480 parent = NULL; 1481 for (node = &gfn_tree->rb_node; *node; ) { 1482 struct kvm_memory_slot *tmp; 1483 1484 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); 1485 parent = *node; 1486 if (slot->base_gfn < tmp->base_gfn) 1487 node = &(*node)->rb_left; 1488 else if (slot->base_gfn > tmp->base_gfn) 1489 node = &(*node)->rb_right; 1490 else 1491 BUG(); 1492 } 1493 1494 rb_link_node(&slot->gfn_node[idx], parent, node); 1495 rb_insert_color(&slot->gfn_node[idx], gfn_tree); 1496 } 1497 1498 static void kvm_erase_gfn_node(struct kvm_memslots *slots, 1499 struct kvm_memory_slot *slot) 1500 { 1501 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); 1502 } 1503 1504 static void kvm_replace_gfn_node(struct kvm_memslots *slots, 1505 struct kvm_memory_slot *old, 1506 struct kvm_memory_slot *new) 1507 { 1508 int idx = slots->node_idx; 1509 1510 WARN_ON_ONCE(old->base_gfn != new->base_gfn); 1511 1512 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], 1513 &slots->gfn_tree); 1514 } 1515 1516 /* 1517 * Replace @old with @new in the inactive memslots. 1518 * 1519 * With NULL @old this simply adds @new. 1520 * With NULL @new this simply removes @old. 1521 * 1522 * If @new is non-NULL its hva_node[slots_idx] range has to be set 1523 * appropriately. 1524 */ 1525 static void kvm_replace_memslot(struct kvm *kvm, 1526 struct kvm_memory_slot *old, 1527 struct kvm_memory_slot *new) 1528 { 1529 int as_id = kvm_memslots_get_as_id(old, new); 1530 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1531 int idx = slots->node_idx; 1532 1533 if (old) { 1534 hash_del(&old->id_node[idx]); 1535 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); 1536 1537 if ((long)old == atomic_long_read(&slots->last_used_slot)) 1538 atomic_long_set(&slots->last_used_slot, (long)new); 1539 1540 if (!new) { 1541 kvm_erase_gfn_node(slots, old); 1542 return; 1543 } 1544 } 1545 1546 /* 1547 * Initialize @new's hva range. Do this even when replacing an @old 1548 * slot, kvm_copy_memslot() deliberately does not touch node data. 1549 */ 1550 new->hva_node[idx].start = new->userspace_addr; 1551 new->hva_node[idx].last = new->userspace_addr + 1552 (new->npages << PAGE_SHIFT) - 1; 1553 1554 /* 1555 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), 1556 * hva_node needs to be swapped with remove+insert even though hva can't 1557 * change when replacing an existing slot. 1558 */ 1559 hash_add(slots->id_hash, &new->id_node[idx], new->id); 1560 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); 1561 1562 /* 1563 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1564 * switch the node in the gfn tree instead of removing the old and 1565 * inserting the new as two separate operations. Replacement is a 1566 * single O(1) operation versus two O(log(n)) operations for 1567 * remove+insert. 1568 */ 1569 if (old && old->base_gfn == new->base_gfn) { 1570 kvm_replace_gfn_node(slots, old, new); 1571 } else { 1572 if (old) 1573 kvm_erase_gfn_node(slots, old); 1574 kvm_insert_gfn_node(slots, new); 1575 } 1576 } 1577 1578 /* 1579 * Flags that do not access any of the extra space of struct 1580 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS 1581 * only allows these. 1582 */ 1583 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \ 1584 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY) 1585 1586 static int check_memory_region_flags(struct kvm *kvm, 1587 const struct kvm_userspace_memory_region2 *mem) 1588 { 1589 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1590 1591 if (kvm_arch_has_private_mem(kvm)) 1592 valid_flags |= KVM_MEM_GUEST_MEMFD; 1593 1594 /* Dirty logging private memory is not currently supported. */ 1595 if (mem->flags & KVM_MEM_GUEST_MEMFD) 1596 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 1597 1598 /* 1599 * GUEST_MEMFD is incompatible with read-only memslots, as writes to 1600 * read-only memslots have emulated MMIO, not page fault, semantics, 1601 * and KVM doesn't allow emulated MMIO for private memory. 1602 */ 1603 if (kvm_arch_has_readonly_mem(kvm) && 1604 !(mem->flags & KVM_MEM_GUEST_MEMFD)) 1605 valid_flags |= KVM_MEM_READONLY; 1606 1607 if (mem->flags & ~valid_flags) 1608 return -EINVAL; 1609 1610 return 0; 1611 } 1612 1613 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) 1614 { 1615 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); 1616 1617 /* Grab the generation from the activate memslots. */ 1618 u64 gen = __kvm_memslots(kvm, as_id)->generation; 1619 1620 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1621 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1622 1623 /* 1624 * Do not store the new memslots while there are invalidations in 1625 * progress, otherwise the locking in invalidate_range_start and 1626 * invalidate_range_end will be unbalanced. 1627 */ 1628 spin_lock(&kvm->mn_invalidate_lock); 1629 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); 1630 while (kvm->mn_active_invalidate_count) { 1631 set_current_state(TASK_UNINTERRUPTIBLE); 1632 spin_unlock(&kvm->mn_invalidate_lock); 1633 schedule(); 1634 spin_lock(&kvm->mn_invalidate_lock); 1635 } 1636 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); 1637 rcu_assign_pointer(kvm->memslots[as_id], slots); 1638 spin_unlock(&kvm->mn_invalidate_lock); 1639 1640 /* 1641 * Acquired in kvm_set_memslot. Must be released before synchronize 1642 * SRCU below in order to avoid deadlock with another thread 1643 * acquiring the slots_arch_lock in an srcu critical section. 1644 */ 1645 mutex_unlock(&kvm->slots_arch_lock); 1646 1647 synchronize_srcu_expedited(&kvm->srcu); 1648 1649 /* 1650 * Increment the new memslot generation a second time, dropping the 1651 * update in-progress flag and incrementing the generation based on 1652 * the number of address spaces. This provides a unique and easily 1653 * identifiable generation number while the memslots are in flux. 1654 */ 1655 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1656 1657 /* 1658 * Generations must be unique even across address spaces. We do not need 1659 * a global counter for that, instead the generation space is evenly split 1660 * across address spaces. For example, with two address spaces, address 1661 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1662 * use generations 1, 3, 5, ... 1663 */ 1664 gen += kvm_arch_nr_memslot_as_ids(kvm); 1665 1666 kvm_arch_memslots_updated(kvm, gen); 1667 1668 slots->generation = gen; 1669 } 1670 1671 static int kvm_prepare_memory_region(struct kvm *kvm, 1672 const struct kvm_memory_slot *old, 1673 struct kvm_memory_slot *new, 1674 enum kvm_mr_change change) 1675 { 1676 int r; 1677 1678 /* 1679 * If dirty logging is disabled, nullify the bitmap; the old bitmap 1680 * will be freed on "commit". If logging is enabled in both old and 1681 * new, reuse the existing bitmap. If logging is enabled only in the 1682 * new and KVM isn't using a ring buffer, allocate and initialize a 1683 * new bitmap. 1684 */ 1685 if (change != KVM_MR_DELETE) { 1686 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) 1687 new->dirty_bitmap = NULL; 1688 else if (old && old->dirty_bitmap) 1689 new->dirty_bitmap = old->dirty_bitmap; 1690 else if (kvm_use_dirty_bitmap(kvm)) { 1691 r = kvm_alloc_dirty_bitmap(new); 1692 if (r) 1693 return r; 1694 1695 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1696 bitmap_set(new->dirty_bitmap, 0, new->npages); 1697 } 1698 } 1699 1700 r = kvm_arch_prepare_memory_region(kvm, old, new, change); 1701 1702 /* Free the bitmap on failure if it was allocated above. */ 1703 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) 1704 kvm_destroy_dirty_bitmap(new); 1705 1706 return r; 1707 } 1708 1709 static void kvm_commit_memory_region(struct kvm *kvm, 1710 struct kvm_memory_slot *old, 1711 const struct kvm_memory_slot *new, 1712 enum kvm_mr_change change) 1713 { 1714 int old_flags = old ? old->flags : 0; 1715 int new_flags = new ? new->flags : 0; 1716 /* 1717 * Update the total number of memslot pages before calling the arch 1718 * hook so that architectures can consume the result directly. 1719 */ 1720 if (change == KVM_MR_DELETE) 1721 kvm->nr_memslot_pages -= old->npages; 1722 else if (change == KVM_MR_CREATE) 1723 kvm->nr_memslot_pages += new->npages; 1724 1725 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) { 1726 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1; 1727 atomic_set(&kvm->nr_memslots_dirty_logging, 1728 atomic_read(&kvm->nr_memslots_dirty_logging) + change); 1729 } 1730 1731 kvm_arch_commit_memory_region(kvm, old, new, change); 1732 1733 switch (change) { 1734 case KVM_MR_CREATE: 1735 /* Nothing more to do. */ 1736 break; 1737 case KVM_MR_DELETE: 1738 /* Free the old memslot and all its metadata. */ 1739 kvm_free_memslot(kvm, old); 1740 break; 1741 case KVM_MR_MOVE: 1742 case KVM_MR_FLAGS_ONLY: 1743 /* 1744 * Free the dirty bitmap as needed; the below check encompasses 1745 * both the flags and whether a ring buffer is being used) 1746 */ 1747 if (old->dirty_bitmap && !new->dirty_bitmap) 1748 kvm_destroy_dirty_bitmap(old); 1749 1750 /* 1751 * The final quirk. Free the detached, old slot, but only its 1752 * memory, not any metadata. Metadata, including arch specific 1753 * data, may be reused by @new. 1754 */ 1755 kfree(old); 1756 break; 1757 default: 1758 BUG(); 1759 } 1760 } 1761 1762 /* 1763 * Activate @new, which must be installed in the inactive slots by the caller, 1764 * by swapping the active slots and then propagating @new to @old once @old is 1765 * unreachable and can be safely modified. 1766 * 1767 * With NULL @old this simply adds @new to @active (while swapping the sets). 1768 * With NULL @new this simply removes @old from @active and frees it 1769 * (while also swapping the sets). 1770 */ 1771 static void kvm_activate_memslot(struct kvm *kvm, 1772 struct kvm_memory_slot *old, 1773 struct kvm_memory_slot *new) 1774 { 1775 int as_id = kvm_memslots_get_as_id(old, new); 1776 1777 kvm_swap_active_memslots(kvm, as_id); 1778 1779 /* Propagate the new memslot to the now inactive memslots. */ 1780 kvm_replace_memslot(kvm, old, new); 1781 } 1782 1783 static void kvm_copy_memslot(struct kvm_memory_slot *dest, 1784 const struct kvm_memory_slot *src) 1785 { 1786 dest->base_gfn = src->base_gfn; 1787 dest->npages = src->npages; 1788 dest->dirty_bitmap = src->dirty_bitmap; 1789 dest->arch = src->arch; 1790 dest->userspace_addr = src->userspace_addr; 1791 dest->flags = src->flags; 1792 dest->id = src->id; 1793 dest->as_id = src->as_id; 1794 } 1795 1796 static void kvm_invalidate_memslot(struct kvm *kvm, 1797 struct kvm_memory_slot *old, 1798 struct kvm_memory_slot *invalid_slot) 1799 { 1800 /* 1801 * Mark the current slot INVALID. As with all memslot modifications, 1802 * this must be done on an unreachable slot to avoid modifying the 1803 * current slot in the active tree. 1804 */ 1805 kvm_copy_memslot(invalid_slot, old); 1806 invalid_slot->flags |= KVM_MEMSLOT_INVALID; 1807 kvm_replace_memslot(kvm, old, invalid_slot); 1808 1809 /* 1810 * Activate the slot that is now marked INVALID, but don't propagate 1811 * the slot to the now inactive slots. The slot is either going to be 1812 * deleted or recreated as a new slot. 1813 */ 1814 kvm_swap_active_memslots(kvm, old->as_id); 1815 1816 /* 1817 * From this point no new shadow pages pointing to a deleted, or moved, 1818 * memslot will be created. Validation of sp->gfn happens in: 1819 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1820 * - kvm_is_visible_gfn (mmu_check_root) 1821 */ 1822 kvm_arch_flush_shadow_memslot(kvm, old); 1823 kvm_arch_guest_memory_reclaimed(kvm); 1824 1825 /* Was released by kvm_swap_active_memslots(), reacquire. */ 1826 mutex_lock(&kvm->slots_arch_lock); 1827 1828 /* 1829 * Copy the arch-specific field of the newly-installed slot back to the 1830 * old slot as the arch data could have changed between releasing 1831 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock 1832 * above. Writers are required to retrieve memslots *after* acquiring 1833 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. 1834 */ 1835 old->arch = invalid_slot->arch; 1836 } 1837 1838 static void kvm_create_memslot(struct kvm *kvm, 1839 struct kvm_memory_slot *new) 1840 { 1841 /* Add the new memslot to the inactive set and activate. */ 1842 kvm_replace_memslot(kvm, NULL, new); 1843 kvm_activate_memslot(kvm, NULL, new); 1844 } 1845 1846 static void kvm_delete_memslot(struct kvm *kvm, 1847 struct kvm_memory_slot *old, 1848 struct kvm_memory_slot *invalid_slot) 1849 { 1850 /* 1851 * Remove the old memslot (in the inactive memslots) by passing NULL as 1852 * the "new" slot, and for the invalid version in the active slots. 1853 */ 1854 kvm_replace_memslot(kvm, old, NULL); 1855 kvm_activate_memslot(kvm, invalid_slot, NULL); 1856 } 1857 1858 static void kvm_move_memslot(struct kvm *kvm, 1859 struct kvm_memory_slot *old, 1860 struct kvm_memory_slot *new, 1861 struct kvm_memory_slot *invalid_slot) 1862 { 1863 /* 1864 * Replace the old memslot in the inactive slots, and then swap slots 1865 * and replace the current INVALID with the new as well. 1866 */ 1867 kvm_replace_memslot(kvm, old, new); 1868 kvm_activate_memslot(kvm, invalid_slot, new); 1869 } 1870 1871 static void kvm_update_flags_memslot(struct kvm *kvm, 1872 struct kvm_memory_slot *old, 1873 struct kvm_memory_slot *new) 1874 { 1875 /* 1876 * Similar to the MOVE case, but the slot doesn't need to be zapped as 1877 * an intermediate step. Instead, the old memslot is simply replaced 1878 * with a new, updated copy in both memslot sets. 1879 */ 1880 kvm_replace_memslot(kvm, old, new); 1881 kvm_activate_memslot(kvm, old, new); 1882 } 1883 1884 static int kvm_set_memslot(struct kvm *kvm, 1885 struct kvm_memory_slot *old, 1886 struct kvm_memory_slot *new, 1887 enum kvm_mr_change change) 1888 { 1889 struct kvm_memory_slot *invalid_slot; 1890 int r; 1891 1892 /* 1893 * Released in kvm_swap_active_memslots(). 1894 * 1895 * Must be held from before the current memslots are copied until after 1896 * the new memslots are installed with rcu_assign_pointer, then 1897 * released before the synchronize srcu in kvm_swap_active_memslots(). 1898 * 1899 * When modifying memslots outside of the slots_lock, must be held 1900 * before reading the pointer to the current memslots until after all 1901 * changes to those memslots are complete. 1902 * 1903 * These rules ensure that installing new memslots does not lose 1904 * changes made to the previous memslots. 1905 */ 1906 mutex_lock(&kvm->slots_arch_lock); 1907 1908 /* 1909 * Invalidate the old slot if it's being deleted or moved. This is 1910 * done prior to actually deleting/moving the memslot to allow vCPUs to 1911 * continue running by ensuring there are no mappings or shadow pages 1912 * for the memslot when it is deleted/moved. Without pre-invalidation 1913 * (and without a lock), a window would exist between effecting the 1914 * delete/move and committing the changes in arch code where KVM or a 1915 * guest could access a non-existent memslot. 1916 * 1917 * Modifications are done on a temporary, unreachable slot. The old 1918 * slot needs to be preserved in case a later step fails and the 1919 * invalidation needs to be reverted. 1920 */ 1921 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1922 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); 1923 if (!invalid_slot) { 1924 mutex_unlock(&kvm->slots_arch_lock); 1925 return -ENOMEM; 1926 } 1927 kvm_invalidate_memslot(kvm, old, invalid_slot); 1928 } 1929 1930 r = kvm_prepare_memory_region(kvm, old, new, change); 1931 if (r) { 1932 /* 1933 * For DELETE/MOVE, revert the above INVALID change. No 1934 * modifications required since the original slot was preserved 1935 * in the inactive slots. Changing the active memslots also 1936 * release slots_arch_lock. 1937 */ 1938 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1939 kvm_activate_memslot(kvm, invalid_slot, old); 1940 kfree(invalid_slot); 1941 } else { 1942 mutex_unlock(&kvm->slots_arch_lock); 1943 } 1944 return r; 1945 } 1946 1947 /* 1948 * For DELETE and MOVE, the working slot is now active as the INVALID 1949 * version of the old slot. MOVE is particularly special as it reuses 1950 * the old slot and returns a copy of the old slot (in working_slot). 1951 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the 1952 * old slot is detached but otherwise preserved. 1953 */ 1954 if (change == KVM_MR_CREATE) 1955 kvm_create_memslot(kvm, new); 1956 else if (change == KVM_MR_DELETE) 1957 kvm_delete_memslot(kvm, old, invalid_slot); 1958 else if (change == KVM_MR_MOVE) 1959 kvm_move_memslot(kvm, old, new, invalid_slot); 1960 else if (change == KVM_MR_FLAGS_ONLY) 1961 kvm_update_flags_memslot(kvm, old, new); 1962 else 1963 BUG(); 1964 1965 /* Free the temporary INVALID slot used for DELETE and MOVE. */ 1966 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) 1967 kfree(invalid_slot); 1968 1969 /* 1970 * No need to refresh new->arch, changes after dropping slots_arch_lock 1971 * will directly hit the final, active memslot. Architectures are 1972 * responsible for knowing that new->arch may be stale. 1973 */ 1974 kvm_commit_memory_region(kvm, old, new, change); 1975 1976 return 0; 1977 } 1978 1979 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, 1980 gfn_t start, gfn_t end) 1981 { 1982 struct kvm_memslot_iter iter; 1983 1984 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { 1985 if (iter.slot->id != id) 1986 return true; 1987 } 1988 1989 return false; 1990 } 1991 1992 static int kvm_set_memory_region(struct kvm *kvm, 1993 const struct kvm_userspace_memory_region2 *mem) 1994 { 1995 struct kvm_memory_slot *old, *new; 1996 struct kvm_memslots *slots; 1997 enum kvm_mr_change change; 1998 unsigned long npages; 1999 gfn_t base_gfn; 2000 int as_id, id; 2001 int r; 2002 2003 lockdep_assert_held(&kvm->slots_lock); 2004 2005 r = check_memory_region_flags(kvm, mem); 2006 if (r) 2007 return r; 2008 2009 as_id = mem->slot >> 16; 2010 id = (u16)mem->slot; 2011 2012 /* General sanity checks */ 2013 if ((mem->memory_size & (PAGE_SIZE - 1)) || 2014 (mem->memory_size != (unsigned long)mem->memory_size)) 2015 return -EINVAL; 2016 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 2017 return -EINVAL; 2018 /* We can read the guest memory with __xxx_user() later on. */ 2019 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 2020 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 2021 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 2022 mem->memory_size)) 2023 return -EINVAL; 2024 if (mem->flags & KVM_MEM_GUEST_MEMFD && 2025 (mem->guest_memfd_offset & (PAGE_SIZE - 1) || 2026 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset)) 2027 return -EINVAL; 2028 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM) 2029 return -EINVAL; 2030 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 2031 return -EINVAL; 2032 2033 /* 2034 * The size of userspace-defined memory regions is restricted in order 2035 * to play nice with dirty bitmap operations, which are indexed with an 2036 * "unsigned int". KVM's internal memory regions don't support dirty 2037 * logging, and so are exempt. 2038 */ 2039 if (id < KVM_USER_MEM_SLOTS && 2040 (mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) 2041 return -EINVAL; 2042 2043 slots = __kvm_memslots(kvm, as_id); 2044 2045 /* 2046 * Note, the old memslot (and the pointer itself!) may be invalidated 2047 * and/or destroyed by kvm_set_memslot(). 2048 */ 2049 old = id_to_memslot(slots, id); 2050 2051 if (!mem->memory_size) { 2052 if (!old || !old->npages) 2053 return -EINVAL; 2054 2055 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) 2056 return -EIO; 2057 2058 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); 2059 } 2060 2061 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); 2062 npages = (mem->memory_size >> PAGE_SHIFT); 2063 2064 if (!old || !old->npages) { 2065 change = KVM_MR_CREATE; 2066 2067 /* 2068 * To simplify KVM internals, the total number of pages across 2069 * all memslots must fit in an unsigned long. 2070 */ 2071 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) 2072 return -EINVAL; 2073 } else { /* Modify an existing slot. */ 2074 /* Private memslots are immutable, they can only be deleted. */ 2075 if (mem->flags & KVM_MEM_GUEST_MEMFD) 2076 return -EINVAL; 2077 if ((mem->userspace_addr != old->userspace_addr) || 2078 (npages != old->npages) || 2079 ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) 2080 return -EINVAL; 2081 2082 if (base_gfn != old->base_gfn) 2083 change = KVM_MR_MOVE; 2084 else if (mem->flags != old->flags) 2085 change = KVM_MR_FLAGS_ONLY; 2086 else /* Nothing to change. */ 2087 return 0; 2088 } 2089 2090 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && 2091 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) 2092 return -EEXIST; 2093 2094 /* Allocate a slot that will persist in the memslot. */ 2095 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); 2096 if (!new) 2097 return -ENOMEM; 2098 2099 new->as_id = as_id; 2100 new->id = id; 2101 new->base_gfn = base_gfn; 2102 new->npages = npages; 2103 new->flags = mem->flags; 2104 new->userspace_addr = mem->userspace_addr; 2105 if (mem->flags & KVM_MEM_GUEST_MEMFD) { 2106 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset); 2107 if (r) 2108 goto out; 2109 } 2110 2111 r = kvm_set_memslot(kvm, old, new, change); 2112 if (r) 2113 goto out_unbind; 2114 2115 return 0; 2116 2117 out_unbind: 2118 if (mem->flags & KVM_MEM_GUEST_MEMFD) 2119 kvm_gmem_unbind(new); 2120 out: 2121 kfree(new); 2122 return r; 2123 } 2124 2125 int kvm_set_internal_memslot(struct kvm *kvm, 2126 const struct kvm_userspace_memory_region2 *mem) 2127 { 2128 if (WARN_ON_ONCE(mem->slot < KVM_USER_MEM_SLOTS)) 2129 return -EINVAL; 2130 2131 if (WARN_ON_ONCE(mem->flags)) 2132 return -EINVAL; 2133 2134 return kvm_set_memory_region(kvm, mem); 2135 } 2136 EXPORT_SYMBOL_GPL(kvm_set_internal_memslot); 2137 2138 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 2139 struct kvm_userspace_memory_region2 *mem) 2140 { 2141 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 2142 return -EINVAL; 2143 2144 guard(mutex)(&kvm->slots_lock); 2145 return kvm_set_memory_region(kvm, mem); 2146 } 2147 2148 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 2149 /** 2150 * kvm_get_dirty_log - get a snapshot of dirty pages 2151 * @kvm: pointer to kvm instance 2152 * @log: slot id and address to which we copy the log 2153 * @is_dirty: set to '1' if any dirty pages were found 2154 * @memslot: set to the associated memslot, always valid on success 2155 */ 2156 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 2157 int *is_dirty, struct kvm_memory_slot **memslot) 2158 { 2159 struct kvm_memslots *slots; 2160 int i, as_id, id; 2161 unsigned long n; 2162 unsigned long any = 0; 2163 2164 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2165 if (!kvm_use_dirty_bitmap(kvm)) 2166 return -ENXIO; 2167 2168 *memslot = NULL; 2169 *is_dirty = 0; 2170 2171 as_id = log->slot >> 16; 2172 id = (u16)log->slot; 2173 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) 2174 return -EINVAL; 2175 2176 slots = __kvm_memslots(kvm, as_id); 2177 *memslot = id_to_memslot(slots, id); 2178 if (!(*memslot) || !(*memslot)->dirty_bitmap) 2179 return -ENOENT; 2180 2181 kvm_arch_sync_dirty_log(kvm, *memslot); 2182 2183 n = kvm_dirty_bitmap_bytes(*memslot); 2184 2185 for (i = 0; !any && i < n/sizeof(long); ++i) 2186 any = (*memslot)->dirty_bitmap[i]; 2187 2188 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 2189 return -EFAULT; 2190 2191 if (any) 2192 *is_dirty = 1; 2193 return 0; 2194 } 2195 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 2196 2197 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2198 /** 2199 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 2200 * and reenable dirty page tracking for the corresponding pages. 2201 * @kvm: pointer to kvm instance 2202 * @log: slot id and address to which we copy the log 2203 * 2204 * We need to keep it in mind that VCPU threads can write to the bitmap 2205 * concurrently. So, to avoid losing track of dirty pages we keep the 2206 * following order: 2207 * 2208 * 1. Take a snapshot of the bit and clear it if needed. 2209 * 2. Write protect the corresponding page. 2210 * 3. Copy the snapshot to the userspace. 2211 * 4. Upon return caller flushes TLB's if needed. 2212 * 2213 * Between 2 and 4, the guest may write to the page using the remaining TLB 2214 * entry. This is not a problem because the page is reported dirty using 2215 * the snapshot taken before and step 4 ensures that writes done after 2216 * exiting to userspace will be logged for the next call. 2217 * 2218 */ 2219 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 2220 { 2221 struct kvm_memslots *slots; 2222 struct kvm_memory_slot *memslot; 2223 int i, as_id, id; 2224 unsigned long n; 2225 unsigned long *dirty_bitmap; 2226 unsigned long *dirty_bitmap_buffer; 2227 bool flush; 2228 2229 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2230 if (!kvm_use_dirty_bitmap(kvm)) 2231 return -ENXIO; 2232 2233 as_id = log->slot >> 16; 2234 id = (u16)log->slot; 2235 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) 2236 return -EINVAL; 2237 2238 slots = __kvm_memslots(kvm, as_id); 2239 memslot = id_to_memslot(slots, id); 2240 if (!memslot || !memslot->dirty_bitmap) 2241 return -ENOENT; 2242 2243 dirty_bitmap = memslot->dirty_bitmap; 2244 2245 kvm_arch_sync_dirty_log(kvm, memslot); 2246 2247 n = kvm_dirty_bitmap_bytes(memslot); 2248 flush = false; 2249 if (kvm->manual_dirty_log_protect) { 2250 /* 2251 * Unlike kvm_get_dirty_log, we always return false in *flush, 2252 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 2253 * is some code duplication between this function and 2254 * kvm_get_dirty_log, but hopefully all architecture 2255 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 2256 * can be eliminated. 2257 */ 2258 dirty_bitmap_buffer = dirty_bitmap; 2259 } else { 2260 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2261 memset(dirty_bitmap_buffer, 0, n); 2262 2263 KVM_MMU_LOCK(kvm); 2264 for (i = 0; i < n / sizeof(long); i++) { 2265 unsigned long mask; 2266 gfn_t offset; 2267 2268 if (!dirty_bitmap[i]) 2269 continue; 2270 2271 flush = true; 2272 mask = xchg(&dirty_bitmap[i], 0); 2273 dirty_bitmap_buffer[i] = mask; 2274 2275 offset = i * BITS_PER_LONG; 2276 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2277 offset, mask); 2278 } 2279 KVM_MMU_UNLOCK(kvm); 2280 } 2281 2282 if (flush) 2283 kvm_flush_remote_tlbs_memslot(kvm, memslot); 2284 2285 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 2286 return -EFAULT; 2287 return 0; 2288 } 2289 2290 2291 /** 2292 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 2293 * @kvm: kvm instance 2294 * @log: slot id and address to which we copy the log 2295 * 2296 * Steps 1-4 below provide general overview of dirty page logging. See 2297 * kvm_get_dirty_log_protect() function description for additional details. 2298 * 2299 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 2300 * always flush the TLB (step 4) even if previous step failed and the dirty 2301 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 2302 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 2303 * writes will be marked dirty for next log read. 2304 * 2305 * 1. Take a snapshot of the bit and clear it if needed. 2306 * 2. Write protect the corresponding page. 2307 * 3. Copy the snapshot to the userspace. 2308 * 4. Flush TLB's if needed. 2309 */ 2310 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2311 struct kvm_dirty_log *log) 2312 { 2313 int r; 2314 2315 mutex_lock(&kvm->slots_lock); 2316 2317 r = kvm_get_dirty_log_protect(kvm, log); 2318 2319 mutex_unlock(&kvm->slots_lock); 2320 return r; 2321 } 2322 2323 /** 2324 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 2325 * and reenable dirty page tracking for the corresponding pages. 2326 * @kvm: pointer to kvm instance 2327 * @log: slot id and address from which to fetch the bitmap of dirty pages 2328 */ 2329 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 2330 struct kvm_clear_dirty_log *log) 2331 { 2332 struct kvm_memslots *slots; 2333 struct kvm_memory_slot *memslot; 2334 int as_id, id; 2335 gfn_t offset; 2336 unsigned long i, n; 2337 unsigned long *dirty_bitmap; 2338 unsigned long *dirty_bitmap_buffer; 2339 bool flush; 2340 2341 /* Dirty ring tracking may be exclusive to dirty log tracking */ 2342 if (!kvm_use_dirty_bitmap(kvm)) 2343 return -ENXIO; 2344 2345 as_id = log->slot >> 16; 2346 id = (u16)log->slot; 2347 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS) 2348 return -EINVAL; 2349 2350 if (log->first_page & 63) 2351 return -EINVAL; 2352 2353 slots = __kvm_memslots(kvm, as_id); 2354 memslot = id_to_memslot(slots, id); 2355 if (!memslot || !memslot->dirty_bitmap) 2356 return -ENOENT; 2357 2358 dirty_bitmap = memslot->dirty_bitmap; 2359 2360 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 2361 2362 if (log->first_page > memslot->npages || 2363 log->num_pages > memslot->npages - log->first_page || 2364 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 2365 return -EINVAL; 2366 2367 kvm_arch_sync_dirty_log(kvm, memslot); 2368 2369 flush = false; 2370 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 2371 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 2372 return -EFAULT; 2373 2374 KVM_MMU_LOCK(kvm); 2375 for (offset = log->first_page, i = offset / BITS_PER_LONG, 2376 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 2377 i++, offset += BITS_PER_LONG) { 2378 unsigned long mask = *dirty_bitmap_buffer++; 2379 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 2380 if (!mask) 2381 continue; 2382 2383 mask &= atomic_long_fetch_andnot(mask, p); 2384 2385 /* 2386 * mask contains the bits that really have been cleared. This 2387 * never includes any bits beyond the length of the memslot (if 2388 * the length is not aligned to 64 pages), therefore it is not 2389 * a problem if userspace sets them in log->dirty_bitmap. 2390 */ 2391 if (mask) { 2392 flush = true; 2393 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 2394 offset, mask); 2395 } 2396 } 2397 KVM_MMU_UNLOCK(kvm); 2398 2399 if (flush) 2400 kvm_flush_remote_tlbs_memslot(kvm, memslot); 2401 2402 return 0; 2403 } 2404 2405 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 2406 struct kvm_clear_dirty_log *log) 2407 { 2408 int r; 2409 2410 mutex_lock(&kvm->slots_lock); 2411 2412 r = kvm_clear_dirty_log_protect(kvm, log); 2413 2414 mutex_unlock(&kvm->slots_lock); 2415 return r; 2416 } 2417 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 2418 2419 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 2420 static u64 kvm_supported_mem_attributes(struct kvm *kvm) 2421 { 2422 if (!kvm || kvm_arch_has_private_mem(kvm)) 2423 return KVM_MEMORY_ATTRIBUTE_PRIVATE; 2424 2425 return 0; 2426 } 2427 2428 /* 2429 * Returns true if _all_ gfns in the range [@start, @end) have attributes 2430 * such that the bits in @mask match @attrs. 2431 */ 2432 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2433 unsigned long mask, unsigned long attrs) 2434 { 2435 XA_STATE(xas, &kvm->mem_attr_array, start); 2436 unsigned long index; 2437 void *entry; 2438 2439 mask &= kvm_supported_mem_attributes(kvm); 2440 if (attrs & ~mask) 2441 return false; 2442 2443 if (end == start + 1) 2444 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs; 2445 2446 guard(rcu)(); 2447 if (!attrs) 2448 return !xas_find(&xas, end - 1); 2449 2450 for (index = start; index < end; index++) { 2451 do { 2452 entry = xas_next(&xas); 2453 } while (xas_retry(&xas, entry)); 2454 2455 if (xas.xa_index != index || 2456 (xa_to_value(entry) & mask) != attrs) 2457 return false; 2458 } 2459 2460 return true; 2461 } 2462 2463 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm, 2464 struct kvm_mmu_notifier_range *range) 2465 { 2466 struct kvm_gfn_range gfn_range; 2467 struct kvm_memory_slot *slot; 2468 struct kvm_memslots *slots; 2469 struct kvm_memslot_iter iter; 2470 bool found_memslot = false; 2471 bool ret = false; 2472 int i; 2473 2474 gfn_range.arg = range->arg; 2475 gfn_range.may_block = range->may_block; 2476 2477 /* 2478 * If/when KVM supports more attributes beyond private .vs shared, this 2479 * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target 2480 * range already has the desired private vs. shared state (it's unclear 2481 * if that is a net win). For now, KVM reaches this point if and only 2482 * if the private flag is being toggled, i.e. all mappings are in play. 2483 */ 2484 2485 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { 2486 slots = __kvm_memslots(kvm, i); 2487 2488 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) { 2489 slot = iter.slot; 2490 gfn_range.slot = slot; 2491 2492 gfn_range.start = max(range->start, slot->base_gfn); 2493 gfn_range.end = min(range->end, slot->base_gfn + slot->npages); 2494 if (gfn_range.start >= gfn_range.end) 2495 continue; 2496 2497 if (!found_memslot) { 2498 found_memslot = true; 2499 KVM_MMU_LOCK(kvm); 2500 if (!IS_KVM_NULL_FN(range->on_lock)) 2501 range->on_lock(kvm); 2502 } 2503 2504 ret |= range->handler(kvm, &gfn_range); 2505 } 2506 } 2507 2508 if (range->flush_on_ret && ret) 2509 kvm_flush_remote_tlbs(kvm); 2510 2511 if (found_memslot) 2512 KVM_MMU_UNLOCK(kvm); 2513 } 2514 2515 static bool kvm_pre_set_memory_attributes(struct kvm *kvm, 2516 struct kvm_gfn_range *range) 2517 { 2518 /* 2519 * Unconditionally add the range to the invalidation set, regardless of 2520 * whether or not the arch callback actually needs to zap SPTEs. E.g. 2521 * if KVM supports RWX attributes in the future and the attributes are 2522 * going from R=>RW, zapping isn't strictly necessary. Unconditionally 2523 * adding the range allows KVM to require that MMU invalidations add at 2524 * least one range between begin() and end(), e.g. allows KVM to detect 2525 * bugs where the add() is missed. Relaxing the rule *might* be safe, 2526 * but it's not obvious that allowing new mappings while the attributes 2527 * are in flux is desirable or worth the complexity. 2528 */ 2529 kvm_mmu_invalidate_range_add(kvm, range->start, range->end); 2530 2531 return kvm_arch_pre_set_memory_attributes(kvm, range); 2532 } 2533 2534 /* Set @attributes for the gfn range [@start, @end). */ 2535 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2536 unsigned long attributes) 2537 { 2538 struct kvm_mmu_notifier_range pre_set_range = { 2539 .start = start, 2540 .end = end, 2541 .arg.attributes = attributes, 2542 .handler = kvm_pre_set_memory_attributes, 2543 .on_lock = kvm_mmu_invalidate_begin, 2544 .flush_on_ret = true, 2545 .may_block = true, 2546 }; 2547 struct kvm_mmu_notifier_range post_set_range = { 2548 .start = start, 2549 .end = end, 2550 .arg.attributes = attributes, 2551 .handler = kvm_arch_post_set_memory_attributes, 2552 .on_lock = kvm_mmu_invalidate_end, 2553 .may_block = true, 2554 }; 2555 unsigned long i; 2556 void *entry; 2557 int r = 0; 2558 2559 entry = attributes ? xa_mk_value(attributes) : NULL; 2560 2561 mutex_lock(&kvm->slots_lock); 2562 2563 /* Nothing to do if the entire range as the desired attributes. */ 2564 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes)) 2565 goto out_unlock; 2566 2567 /* 2568 * Reserve memory ahead of time to avoid having to deal with failures 2569 * partway through setting the new attributes. 2570 */ 2571 for (i = start; i < end; i++) { 2572 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT); 2573 if (r) 2574 goto out_unlock; 2575 2576 cond_resched(); 2577 } 2578 2579 kvm_handle_gfn_range(kvm, &pre_set_range); 2580 2581 for (i = start; i < end; i++) { 2582 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry, 2583 GFP_KERNEL_ACCOUNT)); 2584 KVM_BUG_ON(r, kvm); 2585 cond_resched(); 2586 } 2587 2588 kvm_handle_gfn_range(kvm, &post_set_range); 2589 2590 out_unlock: 2591 mutex_unlock(&kvm->slots_lock); 2592 2593 return r; 2594 } 2595 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm, 2596 struct kvm_memory_attributes *attrs) 2597 { 2598 gfn_t start, end; 2599 2600 /* flags is currently not used. */ 2601 if (attrs->flags) 2602 return -EINVAL; 2603 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm)) 2604 return -EINVAL; 2605 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address) 2606 return -EINVAL; 2607 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size)) 2608 return -EINVAL; 2609 2610 start = attrs->address >> PAGE_SHIFT; 2611 end = (attrs->address + attrs->size) >> PAGE_SHIFT; 2612 2613 /* 2614 * xarray tracks data using "unsigned long", and as a result so does 2615 * KVM. For simplicity, supports generic attributes only on 64-bit 2616 * architectures. 2617 */ 2618 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long)); 2619 2620 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes); 2621 } 2622 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ 2623 2624 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 2625 { 2626 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2627 } 2628 EXPORT_SYMBOL_GPL(gfn_to_memslot); 2629 2630 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 2631 { 2632 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); 2633 u64 gen = slots->generation; 2634 struct kvm_memory_slot *slot; 2635 2636 /* 2637 * This also protects against using a memslot from a different address space, 2638 * since different address spaces have different generation numbers. 2639 */ 2640 if (unlikely(gen != vcpu->last_used_slot_gen)) { 2641 vcpu->last_used_slot = NULL; 2642 vcpu->last_used_slot_gen = gen; 2643 } 2644 2645 slot = try_get_memslot(vcpu->last_used_slot, gfn); 2646 if (slot) 2647 return slot; 2648 2649 /* 2650 * Fall back to searching all memslots. We purposely use 2651 * search_memslots() instead of __gfn_to_memslot() to avoid 2652 * thrashing the VM-wide last_used_slot in kvm_memslots. 2653 */ 2654 slot = search_memslots(slots, gfn, false); 2655 if (slot) { 2656 vcpu->last_used_slot = slot; 2657 return slot; 2658 } 2659 2660 return NULL; 2661 } 2662 2663 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 2664 { 2665 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 2666 2667 return kvm_is_visible_memslot(memslot); 2668 } 2669 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 2670 2671 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2672 { 2673 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2674 2675 return kvm_is_visible_memslot(memslot); 2676 } 2677 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 2678 2679 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 2680 { 2681 struct vm_area_struct *vma; 2682 unsigned long addr, size; 2683 2684 size = PAGE_SIZE; 2685 2686 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 2687 if (kvm_is_error_hva(addr)) 2688 return PAGE_SIZE; 2689 2690 mmap_read_lock(current->mm); 2691 vma = find_vma(current->mm, addr); 2692 if (!vma) 2693 goto out; 2694 2695 size = vma_kernel_pagesize(vma); 2696 2697 out: 2698 mmap_read_unlock(current->mm); 2699 2700 return size; 2701 } 2702 2703 static bool memslot_is_readonly(const struct kvm_memory_slot *slot) 2704 { 2705 return slot->flags & KVM_MEM_READONLY; 2706 } 2707 2708 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, 2709 gfn_t *nr_pages, bool write) 2710 { 2711 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2712 return KVM_HVA_ERR_BAD; 2713 2714 if (memslot_is_readonly(slot) && write) 2715 return KVM_HVA_ERR_RO_BAD; 2716 2717 if (nr_pages) 2718 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2719 2720 return __gfn_to_hva_memslot(slot, gfn); 2721 } 2722 2723 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2724 gfn_t *nr_pages) 2725 { 2726 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2727 } 2728 2729 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2730 gfn_t gfn) 2731 { 2732 return gfn_to_hva_many(slot, gfn, NULL); 2733 } 2734 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2735 2736 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2737 { 2738 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2739 } 2740 EXPORT_SYMBOL_GPL(gfn_to_hva); 2741 2742 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2743 { 2744 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2745 } 2746 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2747 2748 /* 2749 * Return the hva of a @gfn and the R/W attribute if possible. 2750 * 2751 * @slot: the kvm_memory_slot which contains @gfn 2752 * @gfn: the gfn to be translated 2753 * @writable: used to return the read/write attribute of the @slot if the hva 2754 * is valid and @writable is not NULL 2755 */ 2756 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2757 gfn_t gfn, bool *writable) 2758 { 2759 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2760 2761 if (!kvm_is_error_hva(hva) && writable) 2762 *writable = !memslot_is_readonly(slot); 2763 2764 return hva; 2765 } 2766 2767 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2768 { 2769 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2770 2771 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2772 } 2773 2774 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2775 { 2776 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2777 2778 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2779 } 2780 2781 static bool kvm_is_ad_tracked_page(struct page *page) 2782 { 2783 /* 2784 * Per page-flags.h, pages tagged PG_reserved "should in general not be 2785 * touched (e.g. set dirty) except by its owner". 2786 */ 2787 return !PageReserved(page); 2788 } 2789 2790 static void kvm_set_page_dirty(struct page *page) 2791 { 2792 if (kvm_is_ad_tracked_page(page)) 2793 SetPageDirty(page); 2794 } 2795 2796 static void kvm_set_page_accessed(struct page *page) 2797 { 2798 if (kvm_is_ad_tracked_page(page)) 2799 mark_page_accessed(page); 2800 } 2801 2802 void kvm_release_page_clean(struct page *page) 2803 { 2804 if (!page) 2805 return; 2806 2807 kvm_set_page_accessed(page); 2808 put_page(page); 2809 } 2810 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2811 2812 void kvm_release_page_dirty(struct page *page) 2813 { 2814 if (!page) 2815 return; 2816 2817 kvm_set_page_dirty(page); 2818 kvm_release_page_clean(page); 2819 } 2820 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2821 2822 static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page, 2823 struct follow_pfnmap_args *map, bool writable) 2824 { 2825 kvm_pfn_t pfn; 2826 2827 WARN_ON_ONCE(!!page == !!map); 2828 2829 if (kfp->map_writable) 2830 *kfp->map_writable = writable; 2831 2832 if (map) 2833 pfn = map->pfn; 2834 else 2835 pfn = page_to_pfn(page); 2836 2837 *kfp->refcounted_page = page; 2838 2839 return pfn; 2840 } 2841 2842 /* 2843 * The fast path to get the writable pfn which will be stored in @pfn, 2844 * true indicates success, otherwise false is returned. 2845 */ 2846 static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn) 2847 { 2848 struct page *page; 2849 bool r; 2850 2851 /* 2852 * Try the fast-only path when the caller wants to pin/get the page for 2853 * writing. If the caller only wants to read the page, KVM must go 2854 * down the full, slow path in order to avoid racing an operation that 2855 * breaks Copy-on-Write (CoW), e.g. so that KVM doesn't end up pointing 2856 * at the old, read-only page while mm/ points at a new, writable page. 2857 */ 2858 if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable)) 2859 return false; 2860 2861 if (kfp->pin) 2862 r = pin_user_pages_fast(kfp->hva, 1, FOLL_WRITE, &page) == 1; 2863 else 2864 r = get_user_page_fast_only(kfp->hva, FOLL_WRITE, &page); 2865 2866 if (r) { 2867 *pfn = kvm_resolve_pfn(kfp, page, NULL, true); 2868 return true; 2869 } 2870 2871 return false; 2872 } 2873 2874 /* 2875 * The slow path to get the pfn of the specified host virtual address, 2876 * 1 indicates success, -errno is returned if error is detected. 2877 */ 2878 static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn) 2879 { 2880 /* 2881 * When a VCPU accesses a page that is not mapped into the secondary 2882 * MMU, we lookup the page using GUP to map it, so the guest VCPU can 2883 * make progress. We always want to honor NUMA hinting faults in that 2884 * case, because GUP usage corresponds to memory accesses from the VCPU. 2885 * Otherwise, we'd not trigger NUMA hinting faults once a page is 2886 * mapped into the secondary MMU and gets accessed by a VCPU. 2887 * 2888 * Note that get_user_page_fast_only() and FOLL_WRITE for now 2889 * implicitly honor NUMA hinting faults and don't need this flag. 2890 */ 2891 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags; 2892 struct page *page, *wpage; 2893 int npages; 2894 2895 if (kfp->pin) 2896 npages = pin_user_pages_unlocked(kfp->hva, 1, &page, flags); 2897 else 2898 npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags); 2899 if (npages != 1) 2900 return npages; 2901 2902 /* 2903 * Pinning is mutually exclusive with opportunistically mapping a read 2904 * fault as writable, as KVM should never pin pages when mapping memory 2905 * into the guest (pinning is only for direct accesses from KVM). 2906 */ 2907 if (WARN_ON_ONCE(kfp->map_writable && kfp->pin)) 2908 goto out; 2909 2910 /* map read fault as writable if possible */ 2911 if (!(flags & FOLL_WRITE) && kfp->map_writable && 2912 get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) { 2913 put_page(page); 2914 page = wpage; 2915 flags |= FOLL_WRITE; 2916 } 2917 2918 out: 2919 *pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE); 2920 return npages; 2921 } 2922 2923 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2924 { 2925 if (unlikely(!(vma->vm_flags & VM_READ))) 2926 return false; 2927 2928 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2929 return false; 2930 2931 return true; 2932 } 2933 2934 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2935 struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn) 2936 { 2937 struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva }; 2938 bool write_fault = kfp->flags & FOLL_WRITE; 2939 int r; 2940 2941 /* 2942 * Remapped memory cannot be pinned in any meaningful sense. Bail if 2943 * the caller wants to pin the page, i.e. access the page outside of 2944 * MMU notifier protection, and unsafe umappings are disallowed. 2945 */ 2946 if (kfp->pin && !allow_unsafe_mappings) 2947 return -EINVAL; 2948 2949 r = follow_pfnmap_start(&args); 2950 if (r) { 2951 /* 2952 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2953 * not call the fault handler, so do it here. 2954 */ 2955 bool unlocked = false; 2956 r = fixup_user_fault(current->mm, kfp->hva, 2957 (write_fault ? FAULT_FLAG_WRITE : 0), 2958 &unlocked); 2959 if (unlocked) 2960 return -EAGAIN; 2961 if (r) 2962 return r; 2963 2964 r = follow_pfnmap_start(&args); 2965 if (r) 2966 return r; 2967 } 2968 2969 if (write_fault && !args.writable) { 2970 *p_pfn = KVM_PFN_ERR_RO_FAULT; 2971 goto out; 2972 } 2973 2974 *p_pfn = kvm_resolve_pfn(kfp, NULL, &args, args.writable); 2975 out: 2976 follow_pfnmap_end(&args); 2977 return r; 2978 } 2979 2980 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp) 2981 { 2982 struct vm_area_struct *vma; 2983 kvm_pfn_t pfn; 2984 int npages, r; 2985 2986 might_sleep(); 2987 2988 if (WARN_ON_ONCE(!kfp->refcounted_page)) 2989 return KVM_PFN_ERR_FAULT; 2990 2991 if (hva_to_pfn_fast(kfp, &pfn)) 2992 return pfn; 2993 2994 npages = hva_to_pfn_slow(kfp, &pfn); 2995 if (npages == 1) 2996 return pfn; 2997 if (npages == -EINTR || npages == -EAGAIN) 2998 return KVM_PFN_ERR_SIGPENDING; 2999 if (npages == -EHWPOISON) 3000 return KVM_PFN_ERR_HWPOISON; 3001 3002 mmap_read_lock(current->mm); 3003 retry: 3004 vma = vma_lookup(current->mm, kfp->hva); 3005 3006 if (vma == NULL) 3007 pfn = KVM_PFN_ERR_FAULT; 3008 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 3009 r = hva_to_pfn_remapped(vma, kfp, &pfn); 3010 if (r == -EAGAIN) 3011 goto retry; 3012 if (r < 0) 3013 pfn = KVM_PFN_ERR_FAULT; 3014 } else { 3015 if ((kfp->flags & FOLL_NOWAIT) && 3016 vma_is_valid(vma, kfp->flags & FOLL_WRITE)) 3017 pfn = KVM_PFN_ERR_NEEDS_IO; 3018 else 3019 pfn = KVM_PFN_ERR_FAULT; 3020 } 3021 mmap_read_unlock(current->mm); 3022 return pfn; 3023 } 3024 3025 static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp) 3026 { 3027 kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL, 3028 kfp->flags & FOLL_WRITE); 3029 3030 if (kfp->hva == KVM_HVA_ERR_RO_BAD) 3031 return KVM_PFN_ERR_RO_FAULT; 3032 3033 if (kvm_is_error_hva(kfp->hva)) 3034 return KVM_PFN_NOSLOT; 3035 3036 if (memslot_is_readonly(kfp->slot) && kfp->map_writable) { 3037 *kfp->map_writable = false; 3038 kfp->map_writable = NULL; 3039 } 3040 3041 return hva_to_pfn(kfp); 3042 } 3043 3044 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, 3045 unsigned int foll, bool *writable, 3046 struct page **refcounted_page) 3047 { 3048 struct kvm_follow_pfn kfp = { 3049 .slot = slot, 3050 .gfn = gfn, 3051 .flags = foll, 3052 .map_writable = writable, 3053 .refcounted_page = refcounted_page, 3054 }; 3055 3056 if (WARN_ON_ONCE(!writable || !refcounted_page)) 3057 return KVM_PFN_ERR_FAULT; 3058 3059 *writable = false; 3060 *refcounted_page = NULL; 3061 3062 return kvm_follow_pfn(&kfp); 3063 } 3064 EXPORT_SYMBOL_GPL(__kvm_faultin_pfn); 3065 3066 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, 3067 struct page **pages, int nr_pages) 3068 { 3069 unsigned long addr; 3070 gfn_t entry = 0; 3071 3072 addr = gfn_to_hva_many(slot, gfn, &entry); 3073 if (kvm_is_error_hva(addr)) 3074 return -1; 3075 3076 if (entry < nr_pages) 3077 return 0; 3078 3079 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 3080 } 3081 EXPORT_SYMBOL_GPL(kvm_prefetch_pages); 3082 3083 /* 3084 * Don't use this API unless you are absolutely, positively certain that KVM 3085 * needs to get a struct page, e.g. to pin the page for firmware DMA. 3086 * 3087 * FIXME: Users of this API likely need to FOLL_PIN the page, not just elevate 3088 * its refcount. 3089 */ 3090 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write) 3091 { 3092 struct page *refcounted_page = NULL; 3093 struct kvm_follow_pfn kfp = { 3094 .slot = gfn_to_memslot(kvm, gfn), 3095 .gfn = gfn, 3096 .flags = write ? FOLL_WRITE : 0, 3097 .refcounted_page = &refcounted_page, 3098 }; 3099 3100 (void)kvm_follow_pfn(&kfp); 3101 return refcounted_page; 3102 } 3103 EXPORT_SYMBOL_GPL(__gfn_to_page); 3104 3105 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 3106 bool writable) 3107 { 3108 struct kvm_follow_pfn kfp = { 3109 .slot = gfn_to_memslot(vcpu->kvm, gfn), 3110 .gfn = gfn, 3111 .flags = writable ? FOLL_WRITE : 0, 3112 .refcounted_page = &map->pinned_page, 3113 .pin = true, 3114 }; 3115 3116 map->pinned_page = NULL; 3117 map->page = NULL; 3118 map->hva = NULL; 3119 map->gfn = gfn; 3120 map->writable = writable; 3121 3122 map->pfn = kvm_follow_pfn(&kfp); 3123 if (is_error_noslot_pfn(map->pfn)) 3124 return -EINVAL; 3125 3126 if (pfn_valid(map->pfn)) { 3127 map->page = pfn_to_page(map->pfn); 3128 map->hva = kmap(map->page); 3129 #ifdef CONFIG_HAS_IOMEM 3130 } else { 3131 map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB); 3132 #endif 3133 } 3134 3135 return map->hva ? 0 : -EFAULT; 3136 } 3137 EXPORT_SYMBOL_GPL(__kvm_vcpu_map); 3138 3139 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map) 3140 { 3141 if (!map->hva) 3142 return; 3143 3144 if (map->page) 3145 kunmap(map->page); 3146 #ifdef CONFIG_HAS_IOMEM 3147 else 3148 memunmap(map->hva); 3149 #endif 3150 3151 if (map->writable) 3152 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 3153 3154 if (map->pinned_page) { 3155 if (map->writable) 3156 kvm_set_page_dirty(map->pinned_page); 3157 kvm_set_page_accessed(map->pinned_page); 3158 unpin_user_page(map->pinned_page); 3159 } 3160 3161 map->hva = NULL; 3162 map->page = NULL; 3163 map->pinned_page = NULL; 3164 } 3165 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 3166 3167 static int next_segment(unsigned long len, int offset) 3168 { 3169 if (len > PAGE_SIZE - offset) 3170 return PAGE_SIZE - offset; 3171 else 3172 return len; 3173 } 3174 3175 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */ 3176 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 3177 void *data, int offset, int len) 3178 { 3179 int r; 3180 unsigned long addr; 3181 3182 if (WARN_ON_ONCE(offset + len > PAGE_SIZE)) 3183 return -EFAULT; 3184 3185 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3186 if (kvm_is_error_hva(addr)) 3187 return -EFAULT; 3188 r = __copy_from_user(data, (void __user *)addr + offset, len); 3189 if (r) 3190 return -EFAULT; 3191 return 0; 3192 } 3193 3194 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 3195 int len) 3196 { 3197 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3198 3199 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3200 } 3201 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 3202 3203 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 3204 int offset, int len) 3205 { 3206 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3207 3208 return __kvm_read_guest_page(slot, gfn, data, offset, len); 3209 } 3210 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 3211 3212 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 3213 { 3214 gfn_t gfn = gpa >> PAGE_SHIFT; 3215 int seg; 3216 int offset = offset_in_page(gpa); 3217 int ret; 3218 3219 while ((seg = next_segment(len, offset)) != 0) { 3220 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 3221 if (ret < 0) 3222 return ret; 3223 offset = 0; 3224 len -= seg; 3225 data += seg; 3226 ++gfn; 3227 } 3228 return 0; 3229 } 3230 EXPORT_SYMBOL_GPL(kvm_read_guest); 3231 3232 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 3233 { 3234 gfn_t gfn = gpa >> PAGE_SHIFT; 3235 int seg; 3236 int offset = offset_in_page(gpa); 3237 int ret; 3238 3239 while ((seg = next_segment(len, offset)) != 0) { 3240 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 3241 if (ret < 0) 3242 return ret; 3243 offset = 0; 3244 len -= seg; 3245 data += seg; 3246 ++gfn; 3247 } 3248 return 0; 3249 } 3250 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 3251 3252 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 3253 void *data, int offset, unsigned long len) 3254 { 3255 int r; 3256 unsigned long addr; 3257 3258 if (WARN_ON_ONCE(offset + len > PAGE_SIZE)) 3259 return -EFAULT; 3260 3261 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 3262 if (kvm_is_error_hva(addr)) 3263 return -EFAULT; 3264 pagefault_disable(); 3265 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 3266 pagefault_enable(); 3267 if (r) 3268 return -EFAULT; 3269 return 0; 3270 } 3271 3272 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 3273 void *data, unsigned long len) 3274 { 3275 gfn_t gfn = gpa >> PAGE_SHIFT; 3276 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3277 int offset = offset_in_page(gpa); 3278 3279 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 3280 } 3281 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 3282 3283 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */ 3284 static int __kvm_write_guest_page(struct kvm *kvm, 3285 struct kvm_memory_slot *memslot, gfn_t gfn, 3286 const void *data, int offset, int len) 3287 { 3288 int r; 3289 unsigned long addr; 3290 3291 if (WARN_ON_ONCE(offset + len > PAGE_SIZE)) 3292 return -EFAULT; 3293 3294 addr = gfn_to_hva_memslot(memslot, gfn); 3295 if (kvm_is_error_hva(addr)) 3296 return -EFAULT; 3297 r = __copy_to_user((void __user *)addr + offset, data, len); 3298 if (r) 3299 return -EFAULT; 3300 mark_page_dirty_in_slot(kvm, memslot, gfn); 3301 return 0; 3302 } 3303 3304 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 3305 const void *data, int offset, int len) 3306 { 3307 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 3308 3309 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 3310 } 3311 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 3312 3313 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 3314 const void *data, int offset, int len) 3315 { 3316 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3317 3318 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 3319 } 3320 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 3321 3322 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 3323 unsigned long len) 3324 { 3325 gfn_t gfn = gpa >> PAGE_SHIFT; 3326 int seg; 3327 int offset = offset_in_page(gpa); 3328 int ret; 3329 3330 while ((seg = next_segment(len, offset)) != 0) { 3331 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 3332 if (ret < 0) 3333 return ret; 3334 offset = 0; 3335 len -= seg; 3336 data += seg; 3337 ++gfn; 3338 } 3339 return 0; 3340 } 3341 EXPORT_SYMBOL_GPL(kvm_write_guest); 3342 3343 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 3344 unsigned long len) 3345 { 3346 gfn_t gfn = gpa >> PAGE_SHIFT; 3347 int seg; 3348 int offset = offset_in_page(gpa); 3349 int ret; 3350 3351 while ((seg = next_segment(len, offset)) != 0) { 3352 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 3353 if (ret < 0) 3354 return ret; 3355 offset = 0; 3356 len -= seg; 3357 data += seg; 3358 ++gfn; 3359 } 3360 return 0; 3361 } 3362 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 3363 3364 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 3365 struct gfn_to_hva_cache *ghc, 3366 gpa_t gpa, unsigned long len) 3367 { 3368 int offset = offset_in_page(gpa); 3369 gfn_t start_gfn = gpa >> PAGE_SHIFT; 3370 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 3371 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 3372 gfn_t nr_pages_avail; 3373 3374 /* Update ghc->generation before performing any error checks. */ 3375 ghc->generation = slots->generation; 3376 3377 if (start_gfn > end_gfn) { 3378 ghc->hva = KVM_HVA_ERR_BAD; 3379 return -EINVAL; 3380 } 3381 3382 /* 3383 * If the requested region crosses two memslots, we still 3384 * verify that the entire region is valid here. 3385 */ 3386 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 3387 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 3388 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 3389 &nr_pages_avail); 3390 if (kvm_is_error_hva(ghc->hva)) 3391 return -EFAULT; 3392 } 3393 3394 /* Use the slow path for cross page reads and writes. */ 3395 if (nr_pages_needed == 1) 3396 ghc->hva += offset; 3397 else 3398 ghc->memslot = NULL; 3399 3400 ghc->gpa = gpa; 3401 ghc->len = len; 3402 return 0; 3403 } 3404 3405 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3406 gpa_t gpa, unsigned long len) 3407 { 3408 struct kvm_memslots *slots = kvm_memslots(kvm); 3409 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 3410 } 3411 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 3412 3413 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3414 void *data, unsigned int offset, 3415 unsigned long len) 3416 { 3417 struct kvm_memslots *slots = kvm_memslots(kvm); 3418 int r; 3419 gpa_t gpa = ghc->gpa + offset; 3420 3421 if (WARN_ON_ONCE(len + offset > ghc->len)) 3422 return -EINVAL; 3423 3424 if (slots->generation != ghc->generation) { 3425 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3426 return -EFAULT; 3427 } 3428 3429 if (kvm_is_error_hva(ghc->hva)) 3430 return -EFAULT; 3431 3432 if (unlikely(!ghc->memslot)) 3433 return kvm_write_guest(kvm, gpa, data, len); 3434 3435 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 3436 if (r) 3437 return -EFAULT; 3438 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 3439 3440 return 0; 3441 } 3442 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 3443 3444 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3445 void *data, unsigned long len) 3446 { 3447 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 3448 } 3449 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 3450 3451 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3452 void *data, unsigned int offset, 3453 unsigned long len) 3454 { 3455 struct kvm_memslots *slots = kvm_memslots(kvm); 3456 int r; 3457 gpa_t gpa = ghc->gpa + offset; 3458 3459 if (WARN_ON_ONCE(len + offset > ghc->len)) 3460 return -EINVAL; 3461 3462 if (slots->generation != ghc->generation) { 3463 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 3464 return -EFAULT; 3465 } 3466 3467 if (kvm_is_error_hva(ghc->hva)) 3468 return -EFAULT; 3469 3470 if (unlikely(!ghc->memslot)) 3471 return kvm_read_guest(kvm, gpa, data, len); 3472 3473 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 3474 if (r) 3475 return -EFAULT; 3476 3477 return 0; 3478 } 3479 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 3480 3481 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 3482 void *data, unsigned long len) 3483 { 3484 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 3485 } 3486 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 3487 3488 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 3489 { 3490 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3491 gfn_t gfn = gpa >> PAGE_SHIFT; 3492 int seg; 3493 int offset = offset_in_page(gpa); 3494 int ret; 3495 3496 while ((seg = next_segment(len, offset)) != 0) { 3497 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg); 3498 if (ret < 0) 3499 return ret; 3500 offset = 0; 3501 len -= seg; 3502 ++gfn; 3503 } 3504 return 0; 3505 } 3506 EXPORT_SYMBOL_GPL(kvm_clear_guest); 3507 3508 void mark_page_dirty_in_slot(struct kvm *kvm, 3509 const struct kvm_memory_slot *memslot, 3510 gfn_t gfn) 3511 { 3512 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 3513 3514 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 3515 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm)) 3516 return; 3517 3518 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm)); 3519 #endif 3520 3521 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 3522 unsigned long rel_gfn = gfn - memslot->base_gfn; 3523 u32 slot = (memslot->as_id << 16) | memslot->id; 3524 3525 if (kvm->dirty_ring_size && vcpu) 3526 kvm_dirty_ring_push(vcpu, slot, rel_gfn); 3527 else if (memslot->dirty_bitmap) 3528 set_bit_le(rel_gfn, memslot->dirty_bitmap); 3529 } 3530 } 3531 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 3532 3533 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 3534 { 3535 struct kvm_memory_slot *memslot; 3536 3537 memslot = gfn_to_memslot(kvm, gfn); 3538 mark_page_dirty_in_slot(kvm, memslot, gfn); 3539 } 3540 EXPORT_SYMBOL_GPL(mark_page_dirty); 3541 3542 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 3543 { 3544 struct kvm_memory_slot *memslot; 3545 3546 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 3547 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 3548 } 3549 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 3550 3551 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 3552 { 3553 if (!vcpu->sigset_active) 3554 return; 3555 3556 /* 3557 * This does a lockless modification of ->real_blocked, which is fine 3558 * because, only current can change ->real_blocked and all readers of 3559 * ->real_blocked don't care as long ->real_blocked is always a subset 3560 * of ->blocked. 3561 */ 3562 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3563 } 3564 3565 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3566 { 3567 if (!vcpu->sigset_active) 3568 return; 3569 3570 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3571 sigemptyset(¤t->real_blocked); 3572 } 3573 3574 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3575 { 3576 unsigned int old, val, grow, grow_start; 3577 3578 old = val = vcpu->halt_poll_ns; 3579 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3580 grow = READ_ONCE(halt_poll_ns_grow); 3581 if (!grow) 3582 goto out; 3583 3584 val *= grow; 3585 if (val < grow_start) 3586 val = grow_start; 3587 3588 vcpu->halt_poll_ns = val; 3589 out: 3590 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3591 } 3592 3593 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3594 { 3595 unsigned int old, val, shrink, grow_start; 3596 3597 old = val = vcpu->halt_poll_ns; 3598 shrink = READ_ONCE(halt_poll_ns_shrink); 3599 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3600 if (shrink == 0) 3601 val = 0; 3602 else 3603 val /= shrink; 3604 3605 if (val < grow_start) 3606 val = 0; 3607 3608 vcpu->halt_poll_ns = val; 3609 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3610 } 3611 3612 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3613 { 3614 int ret = -EINTR; 3615 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3616 3617 if (kvm_arch_vcpu_runnable(vcpu)) 3618 goto out; 3619 if (kvm_cpu_has_pending_timer(vcpu)) 3620 goto out; 3621 if (signal_pending(current)) 3622 goto out; 3623 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3624 goto out; 3625 3626 ret = 0; 3627 out: 3628 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3629 return ret; 3630 } 3631 3632 /* 3633 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is 3634 * pending. This is mostly used when halting a vCPU, but may also be used 3635 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. 3636 */ 3637 bool kvm_vcpu_block(struct kvm_vcpu *vcpu) 3638 { 3639 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 3640 bool waited = false; 3641 3642 vcpu->stat.generic.blocking = 1; 3643 3644 preempt_disable(); 3645 kvm_arch_vcpu_blocking(vcpu); 3646 prepare_to_rcuwait(wait); 3647 preempt_enable(); 3648 3649 for (;;) { 3650 set_current_state(TASK_INTERRUPTIBLE); 3651 3652 if (kvm_vcpu_check_block(vcpu) < 0) 3653 break; 3654 3655 waited = true; 3656 schedule(); 3657 } 3658 3659 preempt_disable(); 3660 finish_rcuwait(wait); 3661 kvm_arch_vcpu_unblocking(vcpu); 3662 preempt_enable(); 3663 3664 vcpu->stat.generic.blocking = 0; 3665 3666 return waited; 3667 } 3668 3669 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, 3670 ktime_t end, bool success) 3671 { 3672 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; 3673 u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); 3674 3675 ++vcpu->stat.generic.halt_attempted_poll; 3676 3677 if (success) { 3678 ++vcpu->stat.generic.halt_successful_poll; 3679 3680 if (!vcpu_valid_wakeup(vcpu)) 3681 ++vcpu->stat.generic.halt_poll_invalid; 3682 3683 stats->halt_poll_success_ns += poll_ns; 3684 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); 3685 } else { 3686 stats->halt_poll_fail_ns += poll_ns; 3687 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); 3688 } 3689 } 3690 3691 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) 3692 { 3693 struct kvm *kvm = vcpu->kvm; 3694 3695 if (kvm->override_halt_poll_ns) { 3696 /* 3697 * Ensure kvm->max_halt_poll_ns is not read before 3698 * kvm->override_halt_poll_ns. 3699 * 3700 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL. 3701 */ 3702 smp_rmb(); 3703 return READ_ONCE(kvm->max_halt_poll_ns); 3704 } 3705 3706 return READ_ONCE(halt_poll_ns); 3707 } 3708 3709 /* 3710 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt 3711 * polling is enabled, busy wait for a short time before blocking to avoid the 3712 * expensive block+unblock sequence if a wake event arrives soon after the vCPU 3713 * is halted. 3714 */ 3715 void kvm_vcpu_halt(struct kvm_vcpu *vcpu) 3716 { 3717 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3718 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); 3719 ktime_t start, cur, poll_end; 3720 bool waited = false; 3721 bool do_halt_poll; 3722 u64 halt_ns; 3723 3724 if (vcpu->halt_poll_ns > max_halt_poll_ns) 3725 vcpu->halt_poll_ns = max_halt_poll_ns; 3726 3727 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; 3728 3729 start = cur = poll_end = ktime_get(); 3730 if (do_halt_poll) { 3731 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); 3732 3733 do { 3734 if (kvm_vcpu_check_block(vcpu) < 0) 3735 goto out; 3736 cpu_relax(); 3737 poll_end = cur = ktime_get(); 3738 } while (kvm_vcpu_can_poll(cur, stop)); 3739 } 3740 3741 waited = kvm_vcpu_block(vcpu); 3742 3743 cur = ktime_get(); 3744 if (waited) { 3745 vcpu->stat.generic.halt_wait_ns += 3746 ktime_to_ns(cur) - ktime_to_ns(poll_end); 3747 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, 3748 ktime_to_ns(cur) - ktime_to_ns(poll_end)); 3749 } 3750 out: 3751 /* The total time the vCPU was "halted", including polling time. */ 3752 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3753 3754 /* 3755 * Note, halt-polling is considered successful so long as the vCPU was 3756 * never actually scheduled out, i.e. even if the wake event arrived 3757 * after of the halt-polling loop itself, but before the full wait. 3758 */ 3759 if (do_halt_poll) 3760 update_halt_poll_stats(vcpu, start, poll_end, !waited); 3761 3762 if (halt_poll_allowed) { 3763 /* Recompute the max halt poll time in case it changed. */ 3764 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); 3765 3766 if (!vcpu_valid_wakeup(vcpu)) { 3767 shrink_halt_poll_ns(vcpu); 3768 } else if (max_halt_poll_ns) { 3769 if (halt_ns <= vcpu->halt_poll_ns) 3770 ; 3771 /* we had a long block, shrink polling */ 3772 else if (vcpu->halt_poll_ns && 3773 halt_ns > max_halt_poll_ns) 3774 shrink_halt_poll_ns(vcpu); 3775 /* we had a short halt and our poll time is too small */ 3776 else if (vcpu->halt_poll_ns < max_halt_poll_ns && 3777 halt_ns < max_halt_poll_ns) 3778 grow_halt_poll_ns(vcpu); 3779 } else { 3780 vcpu->halt_poll_ns = 0; 3781 } 3782 } 3783 3784 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); 3785 } 3786 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 3787 3788 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3789 { 3790 if (__kvm_vcpu_wake_up(vcpu)) { 3791 WRITE_ONCE(vcpu->ready, true); 3792 ++vcpu->stat.generic.halt_wakeup; 3793 return true; 3794 } 3795 3796 return false; 3797 } 3798 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3799 3800 #ifndef CONFIG_S390 3801 /* 3802 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3803 */ 3804 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait) 3805 { 3806 int me, cpu; 3807 3808 if (kvm_vcpu_wake_up(vcpu)) 3809 return; 3810 3811 me = get_cpu(); 3812 /* 3813 * The only state change done outside the vcpu mutex is IN_GUEST_MODE 3814 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should 3815 * kick" check does not need atomic operations if kvm_vcpu_kick is used 3816 * within the vCPU thread itself. 3817 */ 3818 if (vcpu == __this_cpu_read(kvm_running_vcpu)) { 3819 if (vcpu->mode == IN_GUEST_MODE) 3820 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); 3821 goto out; 3822 } 3823 3824 /* 3825 * Note, the vCPU could get migrated to a different pCPU at any point 3826 * after kvm_arch_vcpu_should_kick(), which could result in sending an 3827 * IPI to the previous pCPU. But, that's ok because the purpose of the 3828 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3829 * vCPU also requires it to leave IN_GUEST_MODE. 3830 */ 3831 if (kvm_arch_vcpu_should_kick(vcpu)) { 3832 cpu = READ_ONCE(vcpu->cpu); 3833 if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) { 3834 /* 3835 * Use a reschedule IPI to kick the vCPU if the caller 3836 * doesn't need to wait for a response, as KVM allows 3837 * kicking vCPUs while IRQs are disabled, but using the 3838 * SMP function call framework with IRQs disabled can 3839 * deadlock due to taking cross-CPU locks. 3840 */ 3841 if (wait) 3842 smp_call_function_single(cpu, ack_kick, NULL, wait); 3843 else 3844 smp_send_reschedule(cpu); 3845 } 3846 } 3847 out: 3848 put_cpu(); 3849 } 3850 EXPORT_SYMBOL_GPL(__kvm_vcpu_kick); 3851 #endif /* !CONFIG_S390 */ 3852 3853 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3854 { 3855 struct task_struct *task = NULL; 3856 int ret; 3857 3858 if (!read_trylock(&target->pid_lock)) 3859 return 0; 3860 3861 if (target->pid) 3862 task = get_pid_task(target->pid, PIDTYPE_PID); 3863 3864 read_unlock(&target->pid_lock); 3865 3866 if (!task) 3867 return 0; 3868 ret = yield_to(task, 1); 3869 put_task_struct(task); 3870 3871 return ret; 3872 } 3873 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3874 3875 /* 3876 * Helper that checks whether a VCPU is eligible for directed yield. 3877 * Most eligible candidate to yield is decided by following heuristics: 3878 * 3879 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3880 * (preempted lock holder), indicated by @in_spin_loop. 3881 * Set at the beginning and cleared at the end of interception/PLE handler. 3882 * 3883 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3884 * chance last time (mostly it has become eligible now since we have probably 3885 * yielded to lockholder in last iteration. This is done by toggling 3886 * @dy_eligible each time a VCPU checked for eligibility.) 3887 * 3888 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3889 * to preempted lock-holder could result in wrong VCPU selection and CPU 3890 * burning. Giving priority for a potential lock-holder increases lock 3891 * progress. 3892 * 3893 * Since algorithm is based on heuristics, accessing another VCPU data without 3894 * locking does not harm. It may result in trying to yield to same VCPU, fail 3895 * and continue with next VCPU and so on. 3896 */ 3897 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3898 { 3899 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3900 bool eligible; 3901 3902 eligible = !vcpu->spin_loop.in_spin_loop || 3903 vcpu->spin_loop.dy_eligible; 3904 3905 if (vcpu->spin_loop.in_spin_loop) 3906 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3907 3908 return eligible; 3909 #else 3910 return true; 3911 #endif 3912 } 3913 3914 /* 3915 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3916 * a vcpu_load/vcpu_put pair. However, for most architectures 3917 * kvm_arch_vcpu_runnable does not require vcpu_load. 3918 */ 3919 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3920 { 3921 return kvm_arch_vcpu_runnable(vcpu); 3922 } 3923 3924 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3925 { 3926 if (kvm_arch_dy_runnable(vcpu)) 3927 return true; 3928 3929 #ifdef CONFIG_KVM_ASYNC_PF 3930 if (!list_empty_careful(&vcpu->async_pf.done)) 3931 return true; 3932 #endif 3933 3934 return false; 3935 } 3936 3937 /* 3938 * By default, simply query the target vCPU's current mode when checking if a 3939 * vCPU was preempted in kernel mode. All architectures except x86 (or more 3940 * specifical, except VMX) allow querying whether or not a vCPU is in kernel 3941 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel() 3942 * directly for cross-vCPU checks is functionally correct and accurate. 3943 */ 3944 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) 3945 { 3946 return kvm_arch_vcpu_in_kernel(vcpu); 3947 } 3948 3949 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3950 { 3951 return false; 3952 } 3953 3954 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3955 { 3956 int nr_vcpus, start, i, idx, yielded; 3957 struct kvm *kvm = me->kvm; 3958 struct kvm_vcpu *vcpu; 3959 int try = 3; 3960 3961 nr_vcpus = atomic_read(&kvm->online_vcpus); 3962 if (nr_vcpus < 2) 3963 return; 3964 3965 /* Pairs with the smp_wmb() in kvm_vm_ioctl_create_vcpu(). */ 3966 smp_rmb(); 3967 3968 kvm_vcpu_set_in_spin_loop(me, true); 3969 3970 /* 3971 * The current vCPU ("me") is spinning in kernel mode, i.e. is likely 3972 * waiting for a resource to become available. Attempt to yield to a 3973 * vCPU that is runnable, but not currently running, e.g. because the 3974 * vCPU was preempted by a higher priority task. With luck, the vCPU 3975 * that was preempted is holding a lock or some other resource that the 3976 * current vCPU is waiting to acquire, and yielding to the other vCPU 3977 * will allow it to make forward progress and release the lock (or kick 3978 * the spinning vCPU, etc). 3979 * 3980 * Since KVM has no insight into what exactly the guest is doing, 3981 * approximate a round-robin selection by iterating over all vCPUs, 3982 * starting at the last boosted vCPU. I.e. if N=kvm->last_boosted_vcpu, 3983 * iterate over vCPU[N+1]..vCPU[N-1], wrapping as needed. 3984 * 3985 * Note, this is inherently racy, e.g. if multiple vCPUs are spinning, 3986 * they may all try to yield to the same vCPU(s). But as above, this 3987 * is all best effort due to KVM's lack of visibility into the guest. 3988 */ 3989 start = READ_ONCE(kvm->last_boosted_vcpu) + 1; 3990 for (i = 0; i < nr_vcpus; i++) { 3991 idx = (start + i) % nr_vcpus; 3992 if (idx == me->vcpu_idx) 3993 continue; 3994 3995 vcpu = xa_load(&kvm->vcpu_array, idx); 3996 if (!READ_ONCE(vcpu->ready)) 3997 continue; 3998 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) 3999 continue; 4000 4001 /* 4002 * Treat the target vCPU as being in-kernel if it has a pending 4003 * interrupt, as the vCPU trying to yield may be spinning 4004 * waiting on IPI delivery, i.e. the target vCPU is in-kernel 4005 * for the purposes of directed yield. 4006 */ 4007 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 4008 !kvm_arch_dy_has_pending_interrupt(vcpu) && 4009 !kvm_arch_vcpu_preempted_in_kernel(vcpu)) 4010 continue; 4011 4012 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 4013 continue; 4014 4015 yielded = kvm_vcpu_yield_to(vcpu); 4016 if (yielded > 0) { 4017 WRITE_ONCE(kvm->last_boosted_vcpu, i); 4018 break; 4019 } else if (yielded < 0 && !--try) { 4020 break; 4021 } 4022 } 4023 kvm_vcpu_set_in_spin_loop(me, false); 4024 4025 /* Ensure vcpu is not eligible during next spinloop */ 4026 kvm_vcpu_set_dy_eligible(me, false); 4027 } 4028 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 4029 4030 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 4031 { 4032 #ifdef CONFIG_HAVE_KVM_DIRTY_RING 4033 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 4034 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 4035 kvm->dirty_ring_size / PAGE_SIZE); 4036 #else 4037 return false; 4038 #endif 4039 } 4040 4041 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 4042 { 4043 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 4044 struct page *page; 4045 4046 if (vmf->pgoff == 0) 4047 page = virt_to_page(vcpu->run); 4048 #ifdef CONFIG_X86 4049 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 4050 page = virt_to_page(vcpu->arch.pio_data); 4051 #endif 4052 #ifdef CONFIG_KVM_MMIO 4053 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 4054 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 4055 #endif 4056 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 4057 page = kvm_dirty_ring_get_page( 4058 &vcpu->dirty_ring, 4059 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 4060 else 4061 return kvm_arch_vcpu_fault(vcpu, vmf); 4062 get_page(page); 4063 vmf->page = page; 4064 return 0; 4065 } 4066 4067 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 4068 .fault = kvm_vcpu_fault, 4069 }; 4070 4071 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 4072 { 4073 struct kvm_vcpu *vcpu = file->private_data; 4074 unsigned long pages = vma_pages(vma); 4075 4076 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 4077 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 4078 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 4079 return -EINVAL; 4080 4081 vma->vm_ops = &kvm_vcpu_vm_ops; 4082 return 0; 4083 } 4084 4085 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 4086 { 4087 struct kvm_vcpu *vcpu = filp->private_data; 4088 4089 kvm_put_kvm(vcpu->kvm); 4090 return 0; 4091 } 4092 4093 static struct file_operations kvm_vcpu_fops = { 4094 .release = kvm_vcpu_release, 4095 .unlocked_ioctl = kvm_vcpu_ioctl, 4096 .mmap = kvm_vcpu_mmap, 4097 .llseek = noop_llseek, 4098 KVM_COMPAT(kvm_vcpu_compat_ioctl), 4099 }; 4100 4101 /* 4102 * Allocates an inode for the vcpu. 4103 */ 4104 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 4105 { 4106 char name[8 + 1 + ITOA_MAX_LEN + 1]; 4107 4108 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 4109 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 4110 } 4111 4112 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 4113 static int vcpu_get_pid(void *data, u64 *val) 4114 { 4115 struct kvm_vcpu *vcpu = data; 4116 4117 read_lock(&vcpu->pid_lock); 4118 *val = pid_nr(vcpu->pid); 4119 read_unlock(&vcpu->pid_lock); 4120 return 0; 4121 } 4122 4123 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n"); 4124 4125 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 4126 { 4127 struct dentry *debugfs_dentry; 4128 char dir_name[ITOA_MAX_LEN * 2]; 4129 4130 if (!debugfs_initialized()) 4131 return; 4132 4133 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 4134 debugfs_dentry = debugfs_create_dir(dir_name, 4135 vcpu->kvm->debugfs_dentry); 4136 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, 4137 &vcpu_get_pid_fops); 4138 4139 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 4140 } 4141 #endif 4142 4143 /* 4144 * Creates some virtual cpus. Good luck creating more than one. 4145 */ 4146 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) 4147 { 4148 int r; 4149 struct kvm_vcpu *vcpu; 4150 struct page *page; 4151 4152 /* 4153 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject 4154 * too-large values instead of silently truncating. 4155 * 4156 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first 4157 * changing the storage type (at the very least, IDs should be tracked 4158 * as unsigned ints). 4159 */ 4160 BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX); 4161 if (id >= KVM_MAX_VCPU_IDS) 4162 return -EINVAL; 4163 4164 mutex_lock(&kvm->lock); 4165 if (kvm->created_vcpus >= kvm->max_vcpus) { 4166 mutex_unlock(&kvm->lock); 4167 return -EINVAL; 4168 } 4169 4170 r = kvm_arch_vcpu_precreate(kvm, id); 4171 if (r) { 4172 mutex_unlock(&kvm->lock); 4173 return r; 4174 } 4175 4176 kvm->created_vcpus++; 4177 mutex_unlock(&kvm->lock); 4178 4179 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 4180 if (!vcpu) { 4181 r = -ENOMEM; 4182 goto vcpu_decrement; 4183 } 4184 4185 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 4186 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 4187 if (!page) { 4188 r = -ENOMEM; 4189 goto vcpu_free; 4190 } 4191 vcpu->run = page_address(page); 4192 4193 kvm_vcpu_init(vcpu, kvm, id); 4194 4195 r = kvm_arch_vcpu_create(vcpu); 4196 if (r) 4197 goto vcpu_free_run_page; 4198 4199 if (kvm->dirty_ring_size) { 4200 r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring, 4201 id, kvm->dirty_ring_size); 4202 if (r) 4203 goto arch_vcpu_destroy; 4204 } 4205 4206 mutex_lock(&kvm->lock); 4207 4208 if (kvm_get_vcpu_by_id(kvm, id)) { 4209 r = -EEXIST; 4210 goto unlock_vcpu_destroy; 4211 } 4212 4213 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 4214 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 4215 WARN_ON_ONCE(r == -EBUSY); 4216 if (r) 4217 goto unlock_vcpu_destroy; 4218 4219 /* 4220 * Now it's all set up, let userspace reach it. Grab the vCPU's mutex 4221 * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully 4222 * visible (per online_vcpus), e.g. so that KVM doesn't get tricked 4223 * into a NULL-pointer dereference because KVM thinks the _current_ 4224 * vCPU doesn't exist. As a bonus, taking vcpu->mutex ensures lockdep 4225 * knows it's taken *inside* kvm->lock. 4226 */ 4227 mutex_lock(&vcpu->mutex); 4228 kvm_get_kvm(kvm); 4229 r = create_vcpu_fd(vcpu); 4230 if (r < 0) 4231 goto kvm_put_xa_erase; 4232 4233 /* 4234 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu 4235 * pointer before kvm->online_vcpu's incremented value. 4236 */ 4237 smp_wmb(); 4238 atomic_inc(&kvm->online_vcpus); 4239 mutex_unlock(&vcpu->mutex); 4240 4241 mutex_unlock(&kvm->lock); 4242 kvm_arch_vcpu_postcreate(vcpu); 4243 kvm_create_vcpu_debugfs(vcpu); 4244 return r; 4245 4246 kvm_put_xa_erase: 4247 mutex_unlock(&vcpu->mutex); 4248 kvm_put_kvm_no_destroy(kvm); 4249 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 4250 unlock_vcpu_destroy: 4251 mutex_unlock(&kvm->lock); 4252 kvm_dirty_ring_free(&vcpu->dirty_ring); 4253 arch_vcpu_destroy: 4254 kvm_arch_vcpu_destroy(vcpu); 4255 vcpu_free_run_page: 4256 free_page((unsigned long)vcpu->run); 4257 vcpu_free: 4258 kmem_cache_free(kvm_vcpu_cache, vcpu); 4259 vcpu_decrement: 4260 mutex_lock(&kvm->lock); 4261 kvm->created_vcpus--; 4262 mutex_unlock(&kvm->lock); 4263 return r; 4264 } 4265 4266 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 4267 { 4268 if (sigset) { 4269 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 4270 vcpu->sigset_active = 1; 4271 vcpu->sigset = *sigset; 4272 } else 4273 vcpu->sigset_active = 0; 4274 return 0; 4275 } 4276 4277 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 4278 size_t size, loff_t *offset) 4279 { 4280 struct kvm_vcpu *vcpu = file->private_data; 4281 4282 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 4283 &kvm_vcpu_stats_desc[0], &vcpu->stat, 4284 sizeof(vcpu->stat), user_buffer, size, offset); 4285 } 4286 4287 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file) 4288 { 4289 struct kvm_vcpu *vcpu = file->private_data; 4290 4291 kvm_put_kvm(vcpu->kvm); 4292 return 0; 4293 } 4294 4295 static const struct file_operations kvm_vcpu_stats_fops = { 4296 .owner = THIS_MODULE, 4297 .read = kvm_vcpu_stats_read, 4298 .release = kvm_vcpu_stats_release, 4299 .llseek = noop_llseek, 4300 }; 4301 4302 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 4303 { 4304 int fd; 4305 struct file *file; 4306 char name[15 + ITOA_MAX_LEN + 1]; 4307 4308 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 4309 4310 fd = get_unused_fd_flags(O_CLOEXEC); 4311 if (fd < 0) 4312 return fd; 4313 4314 file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu, 4315 O_RDONLY, FMODE_PREAD); 4316 if (IS_ERR(file)) { 4317 put_unused_fd(fd); 4318 return PTR_ERR(file); 4319 } 4320 4321 kvm_get_kvm(vcpu->kvm); 4322 fd_install(fd, file); 4323 4324 return fd; 4325 } 4326 4327 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY 4328 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, 4329 struct kvm_pre_fault_memory *range) 4330 { 4331 int idx; 4332 long r; 4333 u64 full_size; 4334 4335 if (range->flags) 4336 return -EINVAL; 4337 4338 if (!PAGE_ALIGNED(range->gpa) || 4339 !PAGE_ALIGNED(range->size) || 4340 range->gpa + range->size <= range->gpa) 4341 return -EINVAL; 4342 4343 vcpu_load(vcpu); 4344 idx = srcu_read_lock(&vcpu->kvm->srcu); 4345 4346 full_size = range->size; 4347 do { 4348 if (signal_pending(current)) { 4349 r = -EINTR; 4350 break; 4351 } 4352 4353 r = kvm_arch_vcpu_pre_fault_memory(vcpu, range); 4354 if (WARN_ON_ONCE(r == 0 || r == -EIO)) 4355 break; 4356 4357 if (r < 0) 4358 break; 4359 4360 range->size -= r; 4361 range->gpa += r; 4362 cond_resched(); 4363 } while (range->size); 4364 4365 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4366 vcpu_put(vcpu); 4367 4368 /* Return success if at least one page was mapped successfully. */ 4369 return full_size == range->size ? r : 0; 4370 } 4371 #endif 4372 4373 static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu) 4374 { 4375 struct kvm *kvm = vcpu->kvm; 4376 4377 /* 4378 * In practice, this happy path will always be taken, as a well-behaved 4379 * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns. 4380 */ 4381 if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus))) 4382 return 0; 4383 4384 /* 4385 * Acquire and release the vCPU's mutex to wait for vCPU creation to 4386 * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU 4387 * is fully online). 4388 */ 4389 if (mutex_lock_killable(&vcpu->mutex)) 4390 return -EINTR; 4391 4392 mutex_unlock(&vcpu->mutex); 4393 4394 if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx))) 4395 return -EIO; 4396 4397 return 0; 4398 } 4399 4400 static long kvm_vcpu_ioctl(struct file *filp, 4401 unsigned int ioctl, unsigned long arg) 4402 { 4403 struct kvm_vcpu *vcpu = filp->private_data; 4404 void __user *argp = (void __user *)arg; 4405 int r; 4406 struct kvm_fpu *fpu = NULL; 4407 struct kvm_sregs *kvm_sregs = NULL; 4408 4409 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4410 return -EIO; 4411 4412 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 4413 return -EINVAL; 4414 4415 /* 4416 * Wait for the vCPU to be online before handling the ioctl(), as KVM 4417 * assumes the vCPU is reachable via vcpu_array, i.e. may dereference 4418 * a NULL pointer if userspace invokes an ioctl() before KVM is ready. 4419 */ 4420 r = kvm_wait_for_vcpu_online(vcpu); 4421 if (r) 4422 return r; 4423 4424 /* 4425 * Some architectures have vcpu ioctls that are asynchronous to vcpu 4426 * execution; mutex_lock() would break them. 4427 */ 4428 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 4429 if (r != -ENOIOCTLCMD) 4430 return r; 4431 4432 if (mutex_lock_killable(&vcpu->mutex)) 4433 return -EINTR; 4434 switch (ioctl) { 4435 case KVM_RUN: { 4436 struct pid *oldpid; 4437 r = -EINVAL; 4438 if (arg) 4439 goto out; 4440 4441 /* 4442 * Note, vcpu->pid is primarily protected by vcpu->mutex. The 4443 * dedicated r/w lock allows other tasks, e.g. other vCPUs, to 4444 * read vcpu->pid while this vCPU is in KVM_RUN, e.g. to yield 4445 * directly to this vCPU 4446 */ 4447 oldpid = vcpu->pid; 4448 if (unlikely(oldpid != task_pid(current))) { 4449 /* The thread running this VCPU changed. */ 4450 struct pid *newpid; 4451 4452 r = kvm_arch_vcpu_run_pid_change(vcpu); 4453 if (r) 4454 break; 4455 4456 newpid = get_task_pid(current, PIDTYPE_PID); 4457 write_lock(&vcpu->pid_lock); 4458 vcpu->pid = newpid; 4459 write_unlock(&vcpu->pid_lock); 4460 4461 put_pid(oldpid); 4462 } 4463 vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe); 4464 r = kvm_arch_vcpu_ioctl_run(vcpu); 4465 vcpu->wants_to_run = false; 4466 4467 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 4468 break; 4469 } 4470 case KVM_GET_REGS: { 4471 struct kvm_regs *kvm_regs; 4472 4473 r = -ENOMEM; 4474 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 4475 if (!kvm_regs) 4476 goto out; 4477 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 4478 if (r) 4479 goto out_free1; 4480 r = -EFAULT; 4481 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 4482 goto out_free1; 4483 r = 0; 4484 out_free1: 4485 kfree(kvm_regs); 4486 break; 4487 } 4488 case KVM_SET_REGS: { 4489 struct kvm_regs *kvm_regs; 4490 4491 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 4492 if (IS_ERR(kvm_regs)) { 4493 r = PTR_ERR(kvm_regs); 4494 goto out; 4495 } 4496 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 4497 kfree(kvm_regs); 4498 break; 4499 } 4500 case KVM_GET_SREGS: { 4501 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 4502 r = -ENOMEM; 4503 if (!kvm_sregs) 4504 goto out; 4505 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 4506 if (r) 4507 goto out; 4508 r = -EFAULT; 4509 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 4510 goto out; 4511 r = 0; 4512 break; 4513 } 4514 case KVM_SET_SREGS: { 4515 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 4516 if (IS_ERR(kvm_sregs)) { 4517 r = PTR_ERR(kvm_sregs); 4518 kvm_sregs = NULL; 4519 goto out; 4520 } 4521 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 4522 break; 4523 } 4524 case KVM_GET_MP_STATE: { 4525 struct kvm_mp_state mp_state; 4526 4527 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 4528 if (r) 4529 goto out; 4530 r = -EFAULT; 4531 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 4532 goto out; 4533 r = 0; 4534 break; 4535 } 4536 case KVM_SET_MP_STATE: { 4537 struct kvm_mp_state mp_state; 4538 4539 r = -EFAULT; 4540 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 4541 goto out; 4542 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 4543 break; 4544 } 4545 case KVM_TRANSLATE: { 4546 struct kvm_translation tr; 4547 4548 r = -EFAULT; 4549 if (copy_from_user(&tr, argp, sizeof(tr))) 4550 goto out; 4551 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 4552 if (r) 4553 goto out; 4554 r = -EFAULT; 4555 if (copy_to_user(argp, &tr, sizeof(tr))) 4556 goto out; 4557 r = 0; 4558 break; 4559 } 4560 case KVM_SET_GUEST_DEBUG: { 4561 struct kvm_guest_debug dbg; 4562 4563 r = -EFAULT; 4564 if (copy_from_user(&dbg, argp, sizeof(dbg))) 4565 goto out; 4566 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 4567 break; 4568 } 4569 case KVM_SET_SIGNAL_MASK: { 4570 struct kvm_signal_mask __user *sigmask_arg = argp; 4571 struct kvm_signal_mask kvm_sigmask; 4572 sigset_t sigset, *p; 4573 4574 p = NULL; 4575 if (argp) { 4576 r = -EFAULT; 4577 if (copy_from_user(&kvm_sigmask, argp, 4578 sizeof(kvm_sigmask))) 4579 goto out; 4580 r = -EINVAL; 4581 if (kvm_sigmask.len != sizeof(sigset)) 4582 goto out; 4583 r = -EFAULT; 4584 if (copy_from_user(&sigset, sigmask_arg->sigset, 4585 sizeof(sigset))) 4586 goto out; 4587 p = &sigset; 4588 } 4589 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 4590 break; 4591 } 4592 case KVM_GET_FPU: { 4593 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 4594 r = -ENOMEM; 4595 if (!fpu) 4596 goto out; 4597 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 4598 if (r) 4599 goto out; 4600 r = -EFAULT; 4601 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 4602 goto out; 4603 r = 0; 4604 break; 4605 } 4606 case KVM_SET_FPU: { 4607 fpu = memdup_user(argp, sizeof(*fpu)); 4608 if (IS_ERR(fpu)) { 4609 r = PTR_ERR(fpu); 4610 fpu = NULL; 4611 goto out; 4612 } 4613 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 4614 break; 4615 } 4616 case KVM_GET_STATS_FD: { 4617 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 4618 break; 4619 } 4620 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY 4621 case KVM_PRE_FAULT_MEMORY: { 4622 struct kvm_pre_fault_memory range; 4623 4624 r = -EFAULT; 4625 if (copy_from_user(&range, argp, sizeof(range))) 4626 break; 4627 r = kvm_vcpu_pre_fault_memory(vcpu, &range); 4628 /* Pass back leftover range. */ 4629 if (copy_to_user(argp, &range, sizeof(range))) 4630 r = -EFAULT; 4631 break; 4632 } 4633 #endif 4634 default: 4635 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 4636 } 4637 out: 4638 mutex_unlock(&vcpu->mutex); 4639 kfree(fpu); 4640 kfree(kvm_sregs); 4641 return r; 4642 } 4643 4644 #ifdef CONFIG_KVM_COMPAT 4645 static long kvm_vcpu_compat_ioctl(struct file *filp, 4646 unsigned int ioctl, unsigned long arg) 4647 { 4648 struct kvm_vcpu *vcpu = filp->private_data; 4649 void __user *argp = compat_ptr(arg); 4650 int r; 4651 4652 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead) 4653 return -EIO; 4654 4655 switch (ioctl) { 4656 case KVM_SET_SIGNAL_MASK: { 4657 struct kvm_signal_mask __user *sigmask_arg = argp; 4658 struct kvm_signal_mask kvm_sigmask; 4659 sigset_t sigset; 4660 4661 if (argp) { 4662 r = -EFAULT; 4663 if (copy_from_user(&kvm_sigmask, argp, 4664 sizeof(kvm_sigmask))) 4665 goto out; 4666 r = -EINVAL; 4667 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 4668 goto out; 4669 r = -EFAULT; 4670 if (get_compat_sigset(&sigset, 4671 (compat_sigset_t __user *)sigmask_arg->sigset)) 4672 goto out; 4673 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 4674 } else 4675 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 4676 break; 4677 } 4678 default: 4679 r = kvm_vcpu_ioctl(filp, ioctl, arg); 4680 } 4681 4682 out: 4683 return r; 4684 } 4685 #endif 4686 4687 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 4688 { 4689 struct kvm_device *dev = filp->private_data; 4690 4691 if (dev->ops->mmap) 4692 return dev->ops->mmap(dev, vma); 4693 4694 return -ENODEV; 4695 } 4696 4697 static int kvm_device_ioctl_attr(struct kvm_device *dev, 4698 int (*accessor)(struct kvm_device *dev, 4699 struct kvm_device_attr *attr), 4700 unsigned long arg) 4701 { 4702 struct kvm_device_attr attr; 4703 4704 if (!accessor) 4705 return -EPERM; 4706 4707 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4708 return -EFAULT; 4709 4710 return accessor(dev, &attr); 4711 } 4712 4713 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 4714 unsigned long arg) 4715 { 4716 struct kvm_device *dev = filp->private_data; 4717 4718 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead) 4719 return -EIO; 4720 4721 switch (ioctl) { 4722 case KVM_SET_DEVICE_ATTR: 4723 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 4724 case KVM_GET_DEVICE_ATTR: 4725 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 4726 case KVM_HAS_DEVICE_ATTR: 4727 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 4728 default: 4729 if (dev->ops->ioctl) 4730 return dev->ops->ioctl(dev, ioctl, arg); 4731 4732 return -ENOTTY; 4733 } 4734 } 4735 4736 static int kvm_device_release(struct inode *inode, struct file *filp) 4737 { 4738 struct kvm_device *dev = filp->private_data; 4739 struct kvm *kvm = dev->kvm; 4740 4741 if (dev->ops->release) { 4742 mutex_lock(&kvm->lock); 4743 list_del_rcu(&dev->vm_node); 4744 synchronize_rcu(); 4745 dev->ops->release(dev); 4746 mutex_unlock(&kvm->lock); 4747 } 4748 4749 kvm_put_kvm(kvm); 4750 return 0; 4751 } 4752 4753 static struct file_operations kvm_device_fops = { 4754 .unlocked_ioctl = kvm_device_ioctl, 4755 .release = kvm_device_release, 4756 KVM_COMPAT(kvm_device_ioctl), 4757 .mmap = kvm_device_mmap, 4758 }; 4759 4760 struct kvm_device *kvm_device_from_filp(struct file *filp) 4761 { 4762 if (filp->f_op != &kvm_device_fops) 4763 return NULL; 4764 4765 return filp->private_data; 4766 } 4767 4768 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 4769 #ifdef CONFIG_KVM_MPIC 4770 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 4771 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 4772 #endif 4773 }; 4774 4775 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 4776 { 4777 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 4778 return -ENOSPC; 4779 4780 if (kvm_device_ops_table[type] != NULL) 4781 return -EEXIST; 4782 4783 kvm_device_ops_table[type] = ops; 4784 return 0; 4785 } 4786 4787 void kvm_unregister_device_ops(u32 type) 4788 { 4789 if (kvm_device_ops_table[type] != NULL) 4790 kvm_device_ops_table[type] = NULL; 4791 } 4792 4793 static int kvm_ioctl_create_device(struct kvm *kvm, 4794 struct kvm_create_device *cd) 4795 { 4796 const struct kvm_device_ops *ops; 4797 struct kvm_device *dev; 4798 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 4799 int type; 4800 int ret; 4801 4802 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 4803 return -ENODEV; 4804 4805 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 4806 ops = kvm_device_ops_table[type]; 4807 if (ops == NULL) 4808 return -ENODEV; 4809 4810 if (test) 4811 return 0; 4812 4813 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 4814 if (!dev) 4815 return -ENOMEM; 4816 4817 dev->ops = ops; 4818 dev->kvm = kvm; 4819 4820 mutex_lock(&kvm->lock); 4821 ret = ops->create(dev, type); 4822 if (ret < 0) { 4823 mutex_unlock(&kvm->lock); 4824 kfree(dev); 4825 return ret; 4826 } 4827 list_add_rcu(&dev->vm_node, &kvm->devices); 4828 mutex_unlock(&kvm->lock); 4829 4830 if (ops->init) 4831 ops->init(dev); 4832 4833 kvm_get_kvm(kvm); 4834 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 4835 if (ret < 0) { 4836 kvm_put_kvm_no_destroy(kvm); 4837 mutex_lock(&kvm->lock); 4838 list_del_rcu(&dev->vm_node); 4839 synchronize_rcu(); 4840 if (ops->release) 4841 ops->release(dev); 4842 mutex_unlock(&kvm->lock); 4843 if (ops->destroy) 4844 ops->destroy(dev); 4845 return ret; 4846 } 4847 4848 cd->fd = ret; 4849 return 0; 4850 } 4851 4852 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4853 { 4854 switch (arg) { 4855 case KVM_CAP_USER_MEMORY: 4856 case KVM_CAP_USER_MEMORY2: 4857 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4858 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4859 case KVM_CAP_INTERNAL_ERROR_DATA: 4860 #ifdef CONFIG_HAVE_KVM_MSI 4861 case KVM_CAP_SIGNAL_MSI: 4862 #endif 4863 #ifdef CONFIG_HAVE_KVM_IRQCHIP 4864 case KVM_CAP_IRQFD: 4865 #endif 4866 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4867 case KVM_CAP_CHECK_EXTENSION_VM: 4868 case KVM_CAP_ENABLE_CAP_VM: 4869 case KVM_CAP_HALT_POLL: 4870 return 1; 4871 #ifdef CONFIG_KVM_MMIO 4872 case KVM_CAP_COALESCED_MMIO: 4873 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4874 case KVM_CAP_COALESCED_PIO: 4875 return 1; 4876 #endif 4877 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4878 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4879 return KVM_DIRTY_LOG_MANUAL_CAPS; 4880 #endif 4881 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4882 case KVM_CAP_IRQ_ROUTING: 4883 return KVM_MAX_IRQ_ROUTES; 4884 #endif 4885 #if KVM_MAX_NR_ADDRESS_SPACES > 1 4886 case KVM_CAP_MULTI_ADDRESS_SPACE: 4887 if (kvm) 4888 return kvm_arch_nr_memslot_as_ids(kvm); 4889 return KVM_MAX_NR_ADDRESS_SPACES; 4890 #endif 4891 case KVM_CAP_NR_MEMSLOTS: 4892 return KVM_USER_MEM_SLOTS; 4893 case KVM_CAP_DIRTY_LOG_RING: 4894 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO 4895 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4896 #else 4897 return 0; 4898 #endif 4899 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4900 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL 4901 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4902 #else 4903 return 0; 4904 #endif 4905 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP 4906 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: 4907 #endif 4908 case KVM_CAP_BINARY_STATS_FD: 4909 case KVM_CAP_SYSTEM_EVENT_DATA: 4910 case KVM_CAP_DEVICE_CTRL: 4911 return 1; 4912 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 4913 case KVM_CAP_MEMORY_ATTRIBUTES: 4914 return kvm_supported_mem_attributes(kvm); 4915 #endif 4916 #ifdef CONFIG_KVM_PRIVATE_MEM 4917 case KVM_CAP_GUEST_MEMFD: 4918 return !kvm || kvm_arch_has_private_mem(kvm); 4919 #endif 4920 default: 4921 break; 4922 } 4923 return kvm_vm_ioctl_check_extension(kvm, arg); 4924 } 4925 4926 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4927 { 4928 int r; 4929 4930 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4931 return -EINVAL; 4932 4933 /* the size should be power of 2 */ 4934 if (!size || (size & (size - 1))) 4935 return -EINVAL; 4936 4937 /* Should be bigger to keep the reserved entries, or a page */ 4938 if (size < kvm_dirty_ring_get_rsvd_entries(kvm) * 4939 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4940 return -EINVAL; 4941 4942 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4943 sizeof(struct kvm_dirty_gfn)) 4944 return -E2BIG; 4945 4946 /* We only allow it to set once */ 4947 if (kvm->dirty_ring_size) 4948 return -EINVAL; 4949 4950 mutex_lock(&kvm->lock); 4951 4952 if (kvm->created_vcpus) { 4953 /* We don't allow to change this value after vcpu created */ 4954 r = -EINVAL; 4955 } else { 4956 kvm->dirty_ring_size = size; 4957 r = 0; 4958 } 4959 4960 mutex_unlock(&kvm->lock); 4961 return r; 4962 } 4963 4964 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4965 { 4966 unsigned long i; 4967 struct kvm_vcpu *vcpu; 4968 int cleared = 0; 4969 4970 if (!kvm->dirty_ring_size) 4971 return -EINVAL; 4972 4973 mutex_lock(&kvm->slots_lock); 4974 4975 kvm_for_each_vcpu(i, vcpu, kvm) 4976 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4977 4978 mutex_unlock(&kvm->slots_lock); 4979 4980 if (cleared) 4981 kvm_flush_remote_tlbs(kvm); 4982 4983 return cleared; 4984 } 4985 4986 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4987 struct kvm_enable_cap *cap) 4988 { 4989 return -EINVAL; 4990 } 4991 4992 bool kvm_are_all_memslots_empty(struct kvm *kvm) 4993 { 4994 int i; 4995 4996 lockdep_assert_held(&kvm->slots_lock); 4997 4998 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { 4999 if (!kvm_memslots_empty(__kvm_memslots(kvm, i))) 5000 return false; 5001 } 5002 5003 return true; 5004 } 5005 EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty); 5006 5007 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 5008 struct kvm_enable_cap *cap) 5009 { 5010 switch (cap->cap) { 5011 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 5012 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 5013 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 5014 5015 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 5016 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 5017 5018 if (cap->flags || (cap->args[0] & ~allowed_options)) 5019 return -EINVAL; 5020 kvm->manual_dirty_log_protect = cap->args[0]; 5021 return 0; 5022 } 5023 #endif 5024 case KVM_CAP_HALT_POLL: { 5025 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 5026 return -EINVAL; 5027 5028 kvm->max_halt_poll_ns = cap->args[0]; 5029 5030 /* 5031 * Ensure kvm->override_halt_poll_ns does not become visible 5032 * before kvm->max_halt_poll_ns. 5033 * 5034 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns(). 5035 */ 5036 smp_wmb(); 5037 kvm->override_halt_poll_ns = true; 5038 5039 return 0; 5040 } 5041 case KVM_CAP_DIRTY_LOG_RING: 5042 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 5043 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) 5044 return -EINVAL; 5045 5046 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 5047 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: { 5048 int r = -EINVAL; 5049 5050 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) || 5051 !kvm->dirty_ring_size || cap->flags) 5052 return r; 5053 5054 mutex_lock(&kvm->slots_lock); 5055 5056 /* 5057 * For simplicity, allow enabling ring+bitmap if and only if 5058 * there are no memslots, e.g. to ensure all memslots allocate 5059 * a bitmap after the capability is enabled. 5060 */ 5061 if (kvm_are_all_memslots_empty(kvm)) { 5062 kvm->dirty_ring_with_bitmap = true; 5063 r = 0; 5064 } 5065 5066 mutex_unlock(&kvm->slots_lock); 5067 5068 return r; 5069 } 5070 default: 5071 return kvm_vm_ioctl_enable_cap(kvm, cap); 5072 } 5073 } 5074 5075 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 5076 size_t size, loff_t *offset) 5077 { 5078 struct kvm *kvm = file->private_data; 5079 5080 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 5081 &kvm_vm_stats_desc[0], &kvm->stat, 5082 sizeof(kvm->stat), user_buffer, size, offset); 5083 } 5084 5085 static int kvm_vm_stats_release(struct inode *inode, struct file *file) 5086 { 5087 struct kvm *kvm = file->private_data; 5088 5089 kvm_put_kvm(kvm); 5090 return 0; 5091 } 5092 5093 static const struct file_operations kvm_vm_stats_fops = { 5094 .owner = THIS_MODULE, 5095 .read = kvm_vm_stats_read, 5096 .release = kvm_vm_stats_release, 5097 .llseek = noop_llseek, 5098 }; 5099 5100 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 5101 { 5102 int fd; 5103 struct file *file; 5104 5105 fd = get_unused_fd_flags(O_CLOEXEC); 5106 if (fd < 0) 5107 return fd; 5108 5109 file = anon_inode_getfile_fmode("kvm-vm-stats", 5110 &kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD); 5111 if (IS_ERR(file)) { 5112 put_unused_fd(fd); 5113 return PTR_ERR(file); 5114 } 5115 5116 kvm_get_kvm(kvm); 5117 fd_install(fd, file); 5118 5119 return fd; 5120 } 5121 5122 #define SANITY_CHECK_MEM_REGION_FIELD(field) \ 5123 do { \ 5124 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \ 5125 offsetof(struct kvm_userspace_memory_region2, field)); \ 5126 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \ 5127 sizeof_field(struct kvm_userspace_memory_region2, field)); \ 5128 } while (0) 5129 5130 static long kvm_vm_ioctl(struct file *filp, 5131 unsigned int ioctl, unsigned long arg) 5132 { 5133 struct kvm *kvm = filp->private_data; 5134 void __user *argp = (void __user *)arg; 5135 int r; 5136 5137 if (kvm->mm != current->mm || kvm->vm_dead) 5138 return -EIO; 5139 switch (ioctl) { 5140 case KVM_CREATE_VCPU: 5141 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 5142 break; 5143 case KVM_ENABLE_CAP: { 5144 struct kvm_enable_cap cap; 5145 5146 r = -EFAULT; 5147 if (copy_from_user(&cap, argp, sizeof(cap))) 5148 goto out; 5149 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 5150 break; 5151 } 5152 case KVM_SET_USER_MEMORY_REGION2: 5153 case KVM_SET_USER_MEMORY_REGION: { 5154 struct kvm_userspace_memory_region2 mem; 5155 unsigned long size; 5156 5157 if (ioctl == KVM_SET_USER_MEMORY_REGION) { 5158 /* 5159 * Fields beyond struct kvm_userspace_memory_region shouldn't be 5160 * accessed, but avoid leaking kernel memory in case of a bug. 5161 */ 5162 memset(&mem, 0, sizeof(mem)); 5163 size = sizeof(struct kvm_userspace_memory_region); 5164 } else { 5165 size = sizeof(struct kvm_userspace_memory_region2); 5166 } 5167 5168 /* Ensure the common parts of the two structs are identical. */ 5169 SANITY_CHECK_MEM_REGION_FIELD(slot); 5170 SANITY_CHECK_MEM_REGION_FIELD(flags); 5171 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr); 5172 SANITY_CHECK_MEM_REGION_FIELD(memory_size); 5173 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr); 5174 5175 r = -EFAULT; 5176 if (copy_from_user(&mem, argp, size)) 5177 goto out; 5178 5179 r = -EINVAL; 5180 if (ioctl == KVM_SET_USER_MEMORY_REGION && 5181 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS)) 5182 goto out; 5183 5184 r = kvm_vm_ioctl_set_memory_region(kvm, &mem); 5185 break; 5186 } 5187 case KVM_GET_DIRTY_LOG: { 5188 struct kvm_dirty_log log; 5189 5190 r = -EFAULT; 5191 if (copy_from_user(&log, argp, sizeof(log))) 5192 goto out; 5193 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 5194 break; 5195 } 5196 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 5197 case KVM_CLEAR_DIRTY_LOG: { 5198 struct kvm_clear_dirty_log log; 5199 5200 r = -EFAULT; 5201 if (copy_from_user(&log, argp, sizeof(log))) 5202 goto out; 5203 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 5204 break; 5205 } 5206 #endif 5207 #ifdef CONFIG_KVM_MMIO 5208 case KVM_REGISTER_COALESCED_MMIO: { 5209 struct kvm_coalesced_mmio_zone zone; 5210 5211 r = -EFAULT; 5212 if (copy_from_user(&zone, argp, sizeof(zone))) 5213 goto out; 5214 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 5215 break; 5216 } 5217 case KVM_UNREGISTER_COALESCED_MMIO: { 5218 struct kvm_coalesced_mmio_zone zone; 5219 5220 r = -EFAULT; 5221 if (copy_from_user(&zone, argp, sizeof(zone))) 5222 goto out; 5223 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 5224 break; 5225 } 5226 #endif 5227 case KVM_IRQFD: { 5228 struct kvm_irqfd data; 5229 5230 r = -EFAULT; 5231 if (copy_from_user(&data, argp, sizeof(data))) 5232 goto out; 5233 r = kvm_irqfd(kvm, &data); 5234 break; 5235 } 5236 case KVM_IOEVENTFD: { 5237 struct kvm_ioeventfd data; 5238 5239 r = -EFAULT; 5240 if (copy_from_user(&data, argp, sizeof(data))) 5241 goto out; 5242 r = kvm_ioeventfd(kvm, &data); 5243 break; 5244 } 5245 #ifdef CONFIG_HAVE_KVM_MSI 5246 case KVM_SIGNAL_MSI: { 5247 struct kvm_msi msi; 5248 5249 r = -EFAULT; 5250 if (copy_from_user(&msi, argp, sizeof(msi))) 5251 goto out; 5252 r = kvm_send_userspace_msi(kvm, &msi); 5253 break; 5254 } 5255 #endif 5256 #ifdef __KVM_HAVE_IRQ_LINE 5257 case KVM_IRQ_LINE_STATUS: 5258 case KVM_IRQ_LINE: { 5259 struct kvm_irq_level irq_event; 5260 5261 r = -EFAULT; 5262 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 5263 goto out; 5264 5265 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 5266 ioctl == KVM_IRQ_LINE_STATUS); 5267 if (r) 5268 goto out; 5269 5270 r = -EFAULT; 5271 if (ioctl == KVM_IRQ_LINE_STATUS) { 5272 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 5273 goto out; 5274 } 5275 5276 r = 0; 5277 break; 5278 } 5279 #endif 5280 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 5281 case KVM_SET_GSI_ROUTING: { 5282 struct kvm_irq_routing routing; 5283 struct kvm_irq_routing __user *urouting; 5284 struct kvm_irq_routing_entry *entries = NULL; 5285 5286 r = -EFAULT; 5287 if (copy_from_user(&routing, argp, sizeof(routing))) 5288 goto out; 5289 r = -EINVAL; 5290 if (!kvm_arch_can_set_irq_routing(kvm)) 5291 goto out; 5292 if (routing.nr > KVM_MAX_IRQ_ROUTES) 5293 goto out; 5294 if (routing.flags) 5295 goto out; 5296 if (routing.nr) { 5297 urouting = argp; 5298 entries = vmemdup_array_user(urouting->entries, 5299 routing.nr, sizeof(*entries)); 5300 if (IS_ERR(entries)) { 5301 r = PTR_ERR(entries); 5302 goto out; 5303 } 5304 } 5305 r = kvm_set_irq_routing(kvm, entries, routing.nr, 5306 routing.flags); 5307 kvfree(entries); 5308 break; 5309 } 5310 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 5311 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 5312 case KVM_SET_MEMORY_ATTRIBUTES: { 5313 struct kvm_memory_attributes attrs; 5314 5315 r = -EFAULT; 5316 if (copy_from_user(&attrs, argp, sizeof(attrs))) 5317 goto out; 5318 5319 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs); 5320 break; 5321 } 5322 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ 5323 case KVM_CREATE_DEVICE: { 5324 struct kvm_create_device cd; 5325 5326 r = -EFAULT; 5327 if (copy_from_user(&cd, argp, sizeof(cd))) 5328 goto out; 5329 5330 r = kvm_ioctl_create_device(kvm, &cd); 5331 if (r) 5332 goto out; 5333 5334 r = -EFAULT; 5335 if (copy_to_user(argp, &cd, sizeof(cd))) 5336 goto out; 5337 5338 r = 0; 5339 break; 5340 } 5341 case KVM_CHECK_EXTENSION: 5342 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 5343 break; 5344 case KVM_RESET_DIRTY_RINGS: 5345 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 5346 break; 5347 case KVM_GET_STATS_FD: 5348 r = kvm_vm_ioctl_get_stats_fd(kvm); 5349 break; 5350 #ifdef CONFIG_KVM_PRIVATE_MEM 5351 case KVM_CREATE_GUEST_MEMFD: { 5352 struct kvm_create_guest_memfd guest_memfd; 5353 5354 r = -EFAULT; 5355 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd))) 5356 goto out; 5357 5358 r = kvm_gmem_create(kvm, &guest_memfd); 5359 break; 5360 } 5361 #endif 5362 default: 5363 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 5364 } 5365 out: 5366 return r; 5367 } 5368 5369 #ifdef CONFIG_KVM_COMPAT 5370 struct compat_kvm_dirty_log { 5371 __u32 slot; 5372 __u32 padding1; 5373 union { 5374 compat_uptr_t dirty_bitmap; /* one bit per page */ 5375 __u64 padding2; 5376 }; 5377 }; 5378 5379 struct compat_kvm_clear_dirty_log { 5380 __u32 slot; 5381 __u32 num_pages; 5382 __u64 first_page; 5383 union { 5384 compat_uptr_t dirty_bitmap; /* one bit per page */ 5385 __u64 padding2; 5386 }; 5387 }; 5388 5389 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 5390 unsigned long arg) 5391 { 5392 return -ENOTTY; 5393 } 5394 5395 static long kvm_vm_compat_ioctl(struct file *filp, 5396 unsigned int ioctl, unsigned long arg) 5397 { 5398 struct kvm *kvm = filp->private_data; 5399 int r; 5400 5401 if (kvm->mm != current->mm || kvm->vm_dead) 5402 return -EIO; 5403 5404 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); 5405 if (r != -ENOTTY) 5406 return r; 5407 5408 switch (ioctl) { 5409 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 5410 case KVM_CLEAR_DIRTY_LOG: { 5411 struct compat_kvm_clear_dirty_log compat_log; 5412 struct kvm_clear_dirty_log log; 5413 5414 if (copy_from_user(&compat_log, (void __user *)arg, 5415 sizeof(compat_log))) 5416 return -EFAULT; 5417 log.slot = compat_log.slot; 5418 log.num_pages = compat_log.num_pages; 5419 log.first_page = compat_log.first_page; 5420 log.padding2 = compat_log.padding2; 5421 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 5422 5423 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 5424 break; 5425 } 5426 #endif 5427 case KVM_GET_DIRTY_LOG: { 5428 struct compat_kvm_dirty_log compat_log; 5429 struct kvm_dirty_log log; 5430 5431 if (copy_from_user(&compat_log, (void __user *)arg, 5432 sizeof(compat_log))) 5433 return -EFAULT; 5434 log.slot = compat_log.slot; 5435 log.padding1 = compat_log.padding1; 5436 log.padding2 = compat_log.padding2; 5437 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 5438 5439 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 5440 break; 5441 } 5442 default: 5443 r = kvm_vm_ioctl(filp, ioctl, arg); 5444 } 5445 return r; 5446 } 5447 #endif 5448 5449 static struct file_operations kvm_vm_fops = { 5450 .release = kvm_vm_release, 5451 .unlocked_ioctl = kvm_vm_ioctl, 5452 .llseek = noop_llseek, 5453 KVM_COMPAT(kvm_vm_compat_ioctl), 5454 }; 5455 5456 bool file_is_kvm(struct file *file) 5457 { 5458 return file && file->f_op == &kvm_vm_fops; 5459 } 5460 EXPORT_SYMBOL_GPL(file_is_kvm); 5461 5462 static int kvm_dev_ioctl_create_vm(unsigned long type) 5463 { 5464 char fdname[ITOA_MAX_LEN + 1]; 5465 int r, fd; 5466 struct kvm *kvm; 5467 struct file *file; 5468 5469 fd = get_unused_fd_flags(O_CLOEXEC); 5470 if (fd < 0) 5471 return fd; 5472 5473 snprintf(fdname, sizeof(fdname), "%d", fd); 5474 5475 kvm = kvm_create_vm(type, fdname); 5476 if (IS_ERR(kvm)) { 5477 r = PTR_ERR(kvm); 5478 goto put_fd; 5479 } 5480 5481 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 5482 if (IS_ERR(file)) { 5483 r = PTR_ERR(file); 5484 goto put_kvm; 5485 } 5486 5487 /* 5488 * Don't call kvm_put_kvm anymore at this point; file->f_op is 5489 * already set, with ->release() being kvm_vm_release(). In error 5490 * cases it will be called by the final fput(file) and will take 5491 * care of doing kvm_put_kvm(kvm). 5492 */ 5493 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 5494 5495 fd_install(fd, file); 5496 return fd; 5497 5498 put_kvm: 5499 kvm_put_kvm(kvm); 5500 put_fd: 5501 put_unused_fd(fd); 5502 return r; 5503 } 5504 5505 static long kvm_dev_ioctl(struct file *filp, 5506 unsigned int ioctl, unsigned long arg) 5507 { 5508 int r = -EINVAL; 5509 5510 switch (ioctl) { 5511 case KVM_GET_API_VERSION: 5512 if (arg) 5513 goto out; 5514 r = KVM_API_VERSION; 5515 break; 5516 case KVM_CREATE_VM: 5517 r = kvm_dev_ioctl_create_vm(arg); 5518 break; 5519 case KVM_CHECK_EXTENSION: 5520 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 5521 break; 5522 case KVM_GET_VCPU_MMAP_SIZE: 5523 if (arg) 5524 goto out; 5525 r = PAGE_SIZE; /* struct kvm_run */ 5526 #ifdef CONFIG_X86 5527 r += PAGE_SIZE; /* pio data page */ 5528 #endif 5529 #ifdef CONFIG_KVM_MMIO 5530 r += PAGE_SIZE; /* coalesced mmio ring page */ 5531 #endif 5532 break; 5533 default: 5534 return kvm_arch_dev_ioctl(filp, ioctl, arg); 5535 } 5536 out: 5537 return r; 5538 } 5539 5540 static struct file_operations kvm_chardev_ops = { 5541 .unlocked_ioctl = kvm_dev_ioctl, 5542 .llseek = noop_llseek, 5543 KVM_COMPAT(kvm_dev_ioctl), 5544 }; 5545 5546 static struct miscdevice kvm_dev = { 5547 KVM_MINOR, 5548 "kvm", 5549 &kvm_chardev_ops, 5550 }; 5551 5552 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 5553 bool enable_virt_at_load = true; 5554 module_param(enable_virt_at_load, bool, 0444); 5555 EXPORT_SYMBOL_GPL(enable_virt_at_load); 5556 5557 __visible bool kvm_rebooting; 5558 EXPORT_SYMBOL_GPL(kvm_rebooting); 5559 5560 static DEFINE_PER_CPU(bool, virtualization_enabled); 5561 static DEFINE_MUTEX(kvm_usage_lock); 5562 static int kvm_usage_count; 5563 5564 __weak void kvm_arch_enable_virtualization(void) 5565 { 5566 5567 } 5568 5569 __weak void kvm_arch_disable_virtualization(void) 5570 { 5571 5572 } 5573 5574 static int kvm_enable_virtualization_cpu(void) 5575 { 5576 if (__this_cpu_read(virtualization_enabled)) 5577 return 0; 5578 5579 if (kvm_arch_enable_virtualization_cpu()) { 5580 pr_info("kvm: enabling virtualization on CPU%d failed\n", 5581 raw_smp_processor_id()); 5582 return -EIO; 5583 } 5584 5585 __this_cpu_write(virtualization_enabled, true); 5586 return 0; 5587 } 5588 5589 static int kvm_online_cpu(unsigned int cpu) 5590 { 5591 /* 5592 * Abort the CPU online process if hardware virtualization cannot 5593 * be enabled. Otherwise running VMs would encounter unrecoverable 5594 * errors when scheduled to this CPU. 5595 */ 5596 return kvm_enable_virtualization_cpu(); 5597 } 5598 5599 static void kvm_disable_virtualization_cpu(void *ign) 5600 { 5601 if (!__this_cpu_read(virtualization_enabled)) 5602 return; 5603 5604 kvm_arch_disable_virtualization_cpu(); 5605 5606 __this_cpu_write(virtualization_enabled, false); 5607 } 5608 5609 static int kvm_offline_cpu(unsigned int cpu) 5610 { 5611 kvm_disable_virtualization_cpu(NULL); 5612 return 0; 5613 } 5614 5615 static void kvm_shutdown(void) 5616 { 5617 /* 5618 * Disable hardware virtualization and set kvm_rebooting to indicate 5619 * that KVM has asynchronously disabled hardware virtualization, i.e. 5620 * that relevant errors and exceptions aren't entirely unexpected. 5621 * Some flavors of hardware virtualization need to be disabled before 5622 * transferring control to firmware (to perform shutdown/reboot), e.g. 5623 * on x86, virtualization can block INIT interrupts, which are used by 5624 * firmware to pull APs back under firmware control. Note, this path 5625 * is used for both shutdown and reboot scenarios, i.e. neither name is 5626 * 100% comprehensive. 5627 */ 5628 pr_info("kvm: exiting hardware virtualization\n"); 5629 kvm_rebooting = true; 5630 on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1); 5631 } 5632 5633 static int kvm_suspend(void) 5634 { 5635 /* 5636 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume 5637 * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage 5638 * count is stable. Assert that kvm_usage_lock is not held to ensure 5639 * the system isn't suspended while KVM is enabling hardware. Hardware 5640 * enabling can be preempted, but the task cannot be frozen until it has 5641 * dropped all locks (userspace tasks are frozen via a fake signal). 5642 */ 5643 lockdep_assert_not_held(&kvm_usage_lock); 5644 lockdep_assert_irqs_disabled(); 5645 5646 kvm_disable_virtualization_cpu(NULL); 5647 return 0; 5648 } 5649 5650 static void kvm_resume(void) 5651 { 5652 lockdep_assert_not_held(&kvm_usage_lock); 5653 lockdep_assert_irqs_disabled(); 5654 5655 WARN_ON_ONCE(kvm_enable_virtualization_cpu()); 5656 } 5657 5658 static struct syscore_ops kvm_syscore_ops = { 5659 .suspend = kvm_suspend, 5660 .resume = kvm_resume, 5661 .shutdown = kvm_shutdown, 5662 }; 5663 5664 int kvm_enable_virtualization(void) 5665 { 5666 int r; 5667 5668 guard(mutex)(&kvm_usage_lock); 5669 5670 if (kvm_usage_count++) 5671 return 0; 5672 5673 kvm_arch_enable_virtualization(); 5674 5675 r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online", 5676 kvm_online_cpu, kvm_offline_cpu); 5677 if (r) 5678 goto err_cpuhp; 5679 5680 register_syscore_ops(&kvm_syscore_ops); 5681 5682 /* 5683 * Undo virtualization enabling and bail if the system is going down. 5684 * If userspace initiated a forced reboot, e.g. reboot -f, then it's 5685 * possible for an in-flight operation to enable virtualization after 5686 * syscore_shutdown() is called, i.e. without kvm_shutdown() being 5687 * invoked. Note, this relies on system_state being set _before_ 5688 * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked 5689 * or this CPU observes the impending shutdown. Which is why KVM uses 5690 * a syscore ops hook instead of registering a dedicated reboot 5691 * notifier (the latter runs before system_state is updated). 5692 */ 5693 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF || 5694 system_state == SYSTEM_RESTART) { 5695 r = -EBUSY; 5696 goto err_rebooting; 5697 } 5698 5699 return 0; 5700 5701 err_rebooting: 5702 unregister_syscore_ops(&kvm_syscore_ops); 5703 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE); 5704 err_cpuhp: 5705 kvm_arch_disable_virtualization(); 5706 --kvm_usage_count; 5707 return r; 5708 } 5709 EXPORT_SYMBOL_GPL(kvm_enable_virtualization); 5710 5711 void kvm_disable_virtualization(void) 5712 { 5713 guard(mutex)(&kvm_usage_lock); 5714 5715 if (--kvm_usage_count) 5716 return; 5717 5718 unregister_syscore_ops(&kvm_syscore_ops); 5719 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE); 5720 kvm_arch_disable_virtualization(); 5721 } 5722 EXPORT_SYMBOL_GPL(kvm_disable_virtualization); 5723 5724 static int kvm_init_virtualization(void) 5725 { 5726 if (enable_virt_at_load) 5727 return kvm_enable_virtualization(); 5728 5729 return 0; 5730 } 5731 5732 static void kvm_uninit_virtualization(void) 5733 { 5734 if (enable_virt_at_load) 5735 kvm_disable_virtualization(); 5736 } 5737 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5738 static int kvm_init_virtualization(void) 5739 { 5740 return 0; 5741 } 5742 5743 static void kvm_uninit_virtualization(void) 5744 { 5745 5746 } 5747 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5748 5749 static void kvm_iodevice_destructor(struct kvm_io_device *dev) 5750 { 5751 if (dev->ops->destructor) 5752 dev->ops->destructor(dev); 5753 } 5754 5755 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 5756 { 5757 int i; 5758 5759 for (i = 0; i < bus->dev_count; i++) { 5760 struct kvm_io_device *pos = bus->range[i].dev; 5761 5762 kvm_iodevice_destructor(pos); 5763 } 5764 kfree(bus); 5765 } 5766 5767 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 5768 const struct kvm_io_range *r2) 5769 { 5770 gpa_t addr1 = r1->addr; 5771 gpa_t addr2 = r2->addr; 5772 5773 if (addr1 < addr2) 5774 return -1; 5775 5776 /* If r2->len == 0, match the exact address. If r2->len != 0, 5777 * accept any overlapping write. Any order is acceptable for 5778 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 5779 * we process all of them. 5780 */ 5781 if (r2->len) { 5782 addr1 += r1->len; 5783 addr2 += r2->len; 5784 } 5785 5786 if (addr1 > addr2) 5787 return 1; 5788 5789 return 0; 5790 } 5791 5792 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 5793 { 5794 return kvm_io_bus_cmp(p1, p2); 5795 } 5796 5797 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 5798 gpa_t addr, int len) 5799 { 5800 struct kvm_io_range *range, key; 5801 int off; 5802 5803 key = (struct kvm_io_range) { 5804 .addr = addr, 5805 .len = len, 5806 }; 5807 5808 range = bsearch(&key, bus->range, bus->dev_count, 5809 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 5810 if (range == NULL) 5811 return -ENOENT; 5812 5813 off = range - bus->range; 5814 5815 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 5816 off--; 5817 5818 return off; 5819 } 5820 5821 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5822 struct kvm_io_range *range, const void *val) 5823 { 5824 int idx; 5825 5826 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5827 if (idx < 0) 5828 return -EOPNOTSUPP; 5829 5830 while (idx < bus->dev_count && 5831 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5832 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 5833 range->len, val)) 5834 return idx; 5835 idx++; 5836 } 5837 5838 return -EOPNOTSUPP; 5839 } 5840 5841 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5842 int len, const void *val) 5843 { 5844 struct kvm_io_bus *bus; 5845 struct kvm_io_range range; 5846 int r; 5847 5848 range = (struct kvm_io_range) { 5849 .addr = addr, 5850 .len = len, 5851 }; 5852 5853 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5854 if (!bus) 5855 return -ENOMEM; 5856 r = __kvm_io_bus_write(vcpu, bus, &range, val); 5857 return r < 0 ? r : 0; 5858 } 5859 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 5860 5861 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 5862 gpa_t addr, int len, const void *val, long cookie) 5863 { 5864 struct kvm_io_bus *bus; 5865 struct kvm_io_range range; 5866 5867 range = (struct kvm_io_range) { 5868 .addr = addr, 5869 .len = len, 5870 }; 5871 5872 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5873 if (!bus) 5874 return -ENOMEM; 5875 5876 /* First try the device referenced by cookie. */ 5877 if ((cookie >= 0) && (cookie < bus->dev_count) && 5878 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 5879 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 5880 val)) 5881 return cookie; 5882 5883 /* 5884 * cookie contained garbage; fall back to search and return the 5885 * correct cookie value. 5886 */ 5887 return __kvm_io_bus_write(vcpu, bus, &range, val); 5888 } 5889 5890 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 5891 struct kvm_io_range *range, void *val) 5892 { 5893 int idx; 5894 5895 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 5896 if (idx < 0) 5897 return -EOPNOTSUPP; 5898 5899 while (idx < bus->dev_count && 5900 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 5901 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 5902 range->len, val)) 5903 return idx; 5904 idx++; 5905 } 5906 5907 return -EOPNOTSUPP; 5908 } 5909 5910 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5911 int len, void *val) 5912 { 5913 struct kvm_io_bus *bus; 5914 struct kvm_io_range range; 5915 int r; 5916 5917 range = (struct kvm_io_range) { 5918 .addr = addr, 5919 .len = len, 5920 }; 5921 5922 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5923 if (!bus) 5924 return -ENOMEM; 5925 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5926 return r < 0 ? r : 0; 5927 } 5928 EXPORT_SYMBOL_GPL(kvm_io_bus_read); 5929 5930 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5931 int len, struct kvm_io_device *dev) 5932 { 5933 int i; 5934 struct kvm_io_bus *new_bus, *bus; 5935 struct kvm_io_range range; 5936 5937 lockdep_assert_held(&kvm->slots_lock); 5938 5939 bus = kvm_get_bus(kvm, bus_idx); 5940 if (!bus) 5941 return -ENOMEM; 5942 5943 /* exclude ioeventfd which is limited by maximum fd */ 5944 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 5945 return -ENOSPC; 5946 5947 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 5948 GFP_KERNEL_ACCOUNT); 5949 if (!new_bus) 5950 return -ENOMEM; 5951 5952 range = (struct kvm_io_range) { 5953 .addr = addr, 5954 .len = len, 5955 .dev = dev, 5956 }; 5957 5958 for (i = 0; i < bus->dev_count; i++) 5959 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 5960 break; 5961 5962 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 5963 new_bus->dev_count++; 5964 new_bus->range[i] = range; 5965 memcpy(new_bus->range + i + 1, bus->range + i, 5966 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 5967 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 5968 synchronize_srcu_expedited(&kvm->srcu); 5969 kfree(bus); 5970 5971 return 0; 5972 } 5973 5974 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 5975 struct kvm_io_device *dev) 5976 { 5977 int i; 5978 struct kvm_io_bus *new_bus, *bus; 5979 5980 lockdep_assert_held(&kvm->slots_lock); 5981 5982 bus = kvm_get_bus(kvm, bus_idx); 5983 if (!bus) 5984 return 0; 5985 5986 for (i = 0; i < bus->dev_count; i++) { 5987 if (bus->range[i].dev == dev) { 5988 break; 5989 } 5990 } 5991 5992 if (i == bus->dev_count) 5993 return 0; 5994 5995 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 5996 GFP_KERNEL_ACCOUNT); 5997 if (new_bus) { 5998 memcpy(new_bus, bus, struct_size(bus, range, i)); 5999 new_bus->dev_count--; 6000 memcpy(new_bus->range + i, bus->range + i + 1, 6001 flex_array_size(new_bus, range, new_bus->dev_count - i)); 6002 } 6003 6004 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 6005 synchronize_srcu_expedited(&kvm->srcu); 6006 6007 /* 6008 * If NULL bus is installed, destroy the old bus, including all the 6009 * attached devices. Otherwise, destroy the caller's device only. 6010 */ 6011 if (!new_bus) { 6012 pr_err("kvm: failed to shrink bus, removing it completely\n"); 6013 kvm_io_bus_destroy(bus); 6014 return -ENOMEM; 6015 } 6016 6017 kvm_iodevice_destructor(dev); 6018 kfree(bus); 6019 return 0; 6020 } 6021 6022 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 6023 gpa_t addr) 6024 { 6025 struct kvm_io_bus *bus; 6026 int dev_idx, srcu_idx; 6027 struct kvm_io_device *iodev = NULL; 6028 6029 srcu_idx = srcu_read_lock(&kvm->srcu); 6030 6031 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 6032 if (!bus) 6033 goto out_unlock; 6034 6035 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 6036 if (dev_idx < 0) 6037 goto out_unlock; 6038 6039 iodev = bus->range[dev_idx].dev; 6040 6041 out_unlock: 6042 srcu_read_unlock(&kvm->srcu, srcu_idx); 6043 6044 return iodev; 6045 } 6046 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 6047 6048 static int kvm_debugfs_open(struct inode *inode, struct file *file, 6049 int (*get)(void *, u64 *), int (*set)(void *, u64), 6050 const char *fmt) 6051 { 6052 int ret; 6053 struct kvm_stat_data *stat_data = inode->i_private; 6054 6055 /* 6056 * The debugfs files are a reference to the kvm struct which 6057 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe 6058 * avoids the race between open and the removal of the debugfs directory. 6059 */ 6060 if (!kvm_get_kvm_safe(stat_data->kvm)) 6061 return -ENOENT; 6062 6063 ret = simple_attr_open(inode, file, get, 6064 kvm_stats_debugfs_mode(stat_data->desc) & 0222 6065 ? set : NULL, fmt); 6066 if (ret) 6067 kvm_put_kvm(stat_data->kvm); 6068 6069 return ret; 6070 } 6071 6072 static int kvm_debugfs_release(struct inode *inode, struct file *file) 6073 { 6074 struct kvm_stat_data *stat_data = inode->i_private; 6075 6076 simple_attr_release(inode, file); 6077 kvm_put_kvm(stat_data->kvm); 6078 6079 return 0; 6080 } 6081 6082 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 6083 { 6084 *val = *(u64 *)((void *)(&kvm->stat) + offset); 6085 6086 return 0; 6087 } 6088 6089 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 6090 { 6091 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 6092 6093 return 0; 6094 } 6095 6096 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 6097 { 6098 unsigned long i; 6099 struct kvm_vcpu *vcpu; 6100 6101 *val = 0; 6102 6103 kvm_for_each_vcpu(i, vcpu, kvm) 6104 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 6105 6106 return 0; 6107 } 6108 6109 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 6110 { 6111 unsigned long i; 6112 struct kvm_vcpu *vcpu; 6113 6114 kvm_for_each_vcpu(i, vcpu, kvm) 6115 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 6116 6117 return 0; 6118 } 6119 6120 static int kvm_stat_data_get(void *data, u64 *val) 6121 { 6122 int r = -EFAULT; 6123 struct kvm_stat_data *stat_data = data; 6124 6125 switch (stat_data->kind) { 6126 case KVM_STAT_VM: 6127 r = kvm_get_stat_per_vm(stat_data->kvm, 6128 stat_data->desc->desc.offset, val); 6129 break; 6130 case KVM_STAT_VCPU: 6131 r = kvm_get_stat_per_vcpu(stat_data->kvm, 6132 stat_data->desc->desc.offset, val); 6133 break; 6134 } 6135 6136 return r; 6137 } 6138 6139 static int kvm_stat_data_clear(void *data, u64 val) 6140 { 6141 int r = -EFAULT; 6142 struct kvm_stat_data *stat_data = data; 6143 6144 if (val) 6145 return -EINVAL; 6146 6147 switch (stat_data->kind) { 6148 case KVM_STAT_VM: 6149 r = kvm_clear_stat_per_vm(stat_data->kvm, 6150 stat_data->desc->desc.offset); 6151 break; 6152 case KVM_STAT_VCPU: 6153 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 6154 stat_data->desc->desc.offset); 6155 break; 6156 } 6157 6158 return r; 6159 } 6160 6161 static int kvm_stat_data_open(struct inode *inode, struct file *file) 6162 { 6163 __simple_attr_check_format("%llu\n", 0ull); 6164 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 6165 kvm_stat_data_clear, "%llu\n"); 6166 } 6167 6168 static const struct file_operations stat_fops_per_vm = { 6169 .owner = THIS_MODULE, 6170 .open = kvm_stat_data_open, 6171 .release = kvm_debugfs_release, 6172 .read = simple_attr_read, 6173 .write = simple_attr_write, 6174 }; 6175 6176 static int vm_stat_get(void *_offset, u64 *val) 6177 { 6178 unsigned offset = (long)_offset; 6179 struct kvm *kvm; 6180 u64 tmp_val; 6181 6182 *val = 0; 6183 mutex_lock(&kvm_lock); 6184 list_for_each_entry(kvm, &vm_list, vm_list) { 6185 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 6186 *val += tmp_val; 6187 } 6188 mutex_unlock(&kvm_lock); 6189 return 0; 6190 } 6191 6192 static int vm_stat_clear(void *_offset, u64 val) 6193 { 6194 unsigned offset = (long)_offset; 6195 struct kvm *kvm; 6196 6197 if (val) 6198 return -EINVAL; 6199 6200 mutex_lock(&kvm_lock); 6201 list_for_each_entry(kvm, &vm_list, vm_list) { 6202 kvm_clear_stat_per_vm(kvm, offset); 6203 } 6204 mutex_unlock(&kvm_lock); 6205 6206 return 0; 6207 } 6208 6209 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 6210 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 6211 6212 static int vcpu_stat_get(void *_offset, u64 *val) 6213 { 6214 unsigned offset = (long)_offset; 6215 struct kvm *kvm; 6216 u64 tmp_val; 6217 6218 *val = 0; 6219 mutex_lock(&kvm_lock); 6220 list_for_each_entry(kvm, &vm_list, vm_list) { 6221 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 6222 *val += tmp_val; 6223 } 6224 mutex_unlock(&kvm_lock); 6225 return 0; 6226 } 6227 6228 static int vcpu_stat_clear(void *_offset, u64 val) 6229 { 6230 unsigned offset = (long)_offset; 6231 struct kvm *kvm; 6232 6233 if (val) 6234 return -EINVAL; 6235 6236 mutex_lock(&kvm_lock); 6237 list_for_each_entry(kvm, &vm_list, vm_list) { 6238 kvm_clear_stat_per_vcpu(kvm, offset); 6239 } 6240 mutex_unlock(&kvm_lock); 6241 6242 return 0; 6243 } 6244 6245 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 6246 "%llu\n"); 6247 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 6248 6249 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 6250 { 6251 struct kobj_uevent_env *env; 6252 unsigned long long created, active; 6253 6254 if (!kvm_dev.this_device || !kvm) 6255 return; 6256 6257 mutex_lock(&kvm_lock); 6258 if (type == KVM_EVENT_CREATE_VM) { 6259 kvm_createvm_count++; 6260 kvm_active_vms++; 6261 } else if (type == KVM_EVENT_DESTROY_VM) { 6262 kvm_active_vms--; 6263 } 6264 created = kvm_createvm_count; 6265 active = kvm_active_vms; 6266 mutex_unlock(&kvm_lock); 6267 6268 env = kzalloc(sizeof(*env), GFP_KERNEL); 6269 if (!env) 6270 return; 6271 6272 add_uevent_var(env, "CREATED=%llu", created); 6273 add_uevent_var(env, "COUNT=%llu", active); 6274 6275 if (type == KVM_EVENT_CREATE_VM) { 6276 add_uevent_var(env, "EVENT=create"); 6277 kvm->userspace_pid = task_pid_nr(current); 6278 } else if (type == KVM_EVENT_DESTROY_VM) { 6279 add_uevent_var(env, "EVENT=destroy"); 6280 } 6281 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 6282 6283 if (!IS_ERR(kvm->debugfs_dentry)) { 6284 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); 6285 6286 if (p) { 6287 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 6288 if (!IS_ERR(tmp)) 6289 add_uevent_var(env, "STATS_PATH=%s", tmp); 6290 kfree(p); 6291 } 6292 } 6293 /* no need for checks, since we are adding at most only 5 keys */ 6294 env->envp[env->envp_idx++] = NULL; 6295 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 6296 kfree(env); 6297 } 6298 6299 static void kvm_init_debug(void) 6300 { 6301 const struct file_operations *fops; 6302 const struct _kvm_stats_desc *pdesc; 6303 int i; 6304 6305 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 6306 6307 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 6308 pdesc = &kvm_vm_stats_desc[i]; 6309 if (kvm_stats_debugfs_mode(pdesc) & 0222) 6310 fops = &vm_stat_fops; 6311 else 6312 fops = &vm_stat_readonly_fops; 6313 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6314 kvm_debugfs_dir, 6315 (void *)(long)pdesc->desc.offset, fops); 6316 } 6317 6318 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 6319 pdesc = &kvm_vcpu_stats_desc[i]; 6320 if (kvm_stats_debugfs_mode(pdesc) & 0222) 6321 fops = &vcpu_stat_fops; 6322 else 6323 fops = &vcpu_stat_readonly_fops; 6324 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6325 kvm_debugfs_dir, 6326 (void *)(long)pdesc->desc.offset, fops); 6327 } 6328 } 6329 6330 static inline 6331 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 6332 { 6333 return container_of(pn, struct kvm_vcpu, preempt_notifier); 6334 } 6335 6336 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 6337 { 6338 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 6339 6340 WRITE_ONCE(vcpu->preempted, false); 6341 WRITE_ONCE(vcpu->ready, false); 6342 6343 __this_cpu_write(kvm_running_vcpu, vcpu); 6344 kvm_arch_vcpu_load(vcpu, cpu); 6345 6346 WRITE_ONCE(vcpu->scheduled_out, false); 6347 } 6348 6349 static void kvm_sched_out(struct preempt_notifier *pn, 6350 struct task_struct *next) 6351 { 6352 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 6353 6354 WRITE_ONCE(vcpu->scheduled_out, true); 6355 6356 if (task_is_runnable(current) && vcpu->wants_to_run) { 6357 WRITE_ONCE(vcpu->preempted, true); 6358 WRITE_ONCE(vcpu->ready, true); 6359 } 6360 kvm_arch_vcpu_put(vcpu); 6361 __this_cpu_write(kvm_running_vcpu, NULL); 6362 } 6363 6364 /** 6365 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 6366 * 6367 * We can disable preemption locally around accessing the per-CPU variable, 6368 * and use the resolved vcpu pointer after enabling preemption again, 6369 * because even if the current thread is migrated to another CPU, reading 6370 * the per-CPU value later will give us the same value as we update the 6371 * per-CPU variable in the preempt notifier handlers. 6372 */ 6373 struct kvm_vcpu *kvm_get_running_vcpu(void) 6374 { 6375 struct kvm_vcpu *vcpu; 6376 6377 preempt_disable(); 6378 vcpu = __this_cpu_read(kvm_running_vcpu); 6379 preempt_enable(); 6380 6381 return vcpu; 6382 } 6383 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 6384 6385 /** 6386 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 6387 */ 6388 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 6389 { 6390 return &kvm_running_vcpu; 6391 } 6392 6393 #ifdef CONFIG_GUEST_PERF_EVENTS 6394 static unsigned int kvm_guest_state(void) 6395 { 6396 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 6397 unsigned int state; 6398 6399 if (!kvm_arch_pmi_in_guest(vcpu)) 6400 return 0; 6401 6402 state = PERF_GUEST_ACTIVE; 6403 if (!kvm_arch_vcpu_in_kernel(vcpu)) 6404 state |= PERF_GUEST_USER; 6405 6406 return state; 6407 } 6408 6409 static unsigned long kvm_guest_get_ip(void) 6410 { 6411 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 6412 6413 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ 6414 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) 6415 return 0; 6416 6417 return kvm_arch_vcpu_get_ip(vcpu); 6418 } 6419 6420 static struct perf_guest_info_callbacks kvm_guest_cbs = { 6421 .state = kvm_guest_state, 6422 .get_ip = kvm_guest_get_ip, 6423 .handle_intel_pt_intr = NULL, 6424 }; 6425 6426 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) 6427 { 6428 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; 6429 perf_register_guest_info_callbacks(&kvm_guest_cbs); 6430 } 6431 void kvm_unregister_perf_callbacks(void) 6432 { 6433 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 6434 } 6435 #endif 6436 6437 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module) 6438 { 6439 int r; 6440 int cpu; 6441 6442 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 6443 if (!vcpu_align) 6444 vcpu_align = __alignof__(struct kvm_vcpu); 6445 kvm_vcpu_cache = 6446 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 6447 SLAB_ACCOUNT, 6448 offsetof(struct kvm_vcpu, arch), 6449 offsetofend(struct kvm_vcpu, stats_id) 6450 - offsetof(struct kvm_vcpu, arch), 6451 NULL); 6452 if (!kvm_vcpu_cache) 6453 return -ENOMEM; 6454 6455 for_each_possible_cpu(cpu) { 6456 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), 6457 GFP_KERNEL, cpu_to_node(cpu))) { 6458 r = -ENOMEM; 6459 goto err_cpu_kick_mask; 6460 } 6461 } 6462 6463 r = kvm_irqfd_init(); 6464 if (r) 6465 goto err_irqfd; 6466 6467 r = kvm_async_pf_init(); 6468 if (r) 6469 goto err_async_pf; 6470 6471 kvm_chardev_ops.owner = module; 6472 kvm_vm_fops.owner = module; 6473 kvm_vcpu_fops.owner = module; 6474 kvm_device_fops.owner = module; 6475 6476 kvm_preempt_ops.sched_in = kvm_sched_in; 6477 kvm_preempt_ops.sched_out = kvm_sched_out; 6478 6479 kvm_init_debug(); 6480 6481 r = kvm_vfio_ops_init(); 6482 if (WARN_ON_ONCE(r)) 6483 goto err_vfio; 6484 6485 kvm_gmem_init(module); 6486 6487 r = kvm_init_virtualization(); 6488 if (r) 6489 goto err_virt; 6490 6491 /* 6492 * Registration _must_ be the very last thing done, as this exposes 6493 * /dev/kvm to userspace, i.e. all infrastructure must be setup! 6494 */ 6495 r = misc_register(&kvm_dev); 6496 if (r) { 6497 pr_err("kvm: misc device register failed\n"); 6498 goto err_register; 6499 } 6500 6501 return 0; 6502 6503 err_register: 6504 kvm_uninit_virtualization(); 6505 err_virt: 6506 kvm_vfio_ops_exit(); 6507 err_vfio: 6508 kvm_async_pf_deinit(); 6509 err_async_pf: 6510 kvm_irqfd_exit(); 6511 err_irqfd: 6512 err_cpu_kick_mask: 6513 for_each_possible_cpu(cpu) 6514 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6515 kmem_cache_destroy(kvm_vcpu_cache); 6516 return r; 6517 } 6518 EXPORT_SYMBOL_GPL(kvm_init); 6519 6520 void kvm_exit(void) 6521 { 6522 int cpu; 6523 6524 /* 6525 * Note, unregistering /dev/kvm doesn't strictly need to come first, 6526 * fops_get(), a.k.a. try_module_get(), prevents acquiring references 6527 * to KVM while the module is being stopped. 6528 */ 6529 misc_deregister(&kvm_dev); 6530 6531 kvm_uninit_virtualization(); 6532 6533 debugfs_remove_recursive(kvm_debugfs_dir); 6534 for_each_possible_cpu(cpu) 6535 free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); 6536 kmem_cache_destroy(kvm_vcpu_cache); 6537 kvm_vfio_ops_exit(); 6538 kvm_async_pf_deinit(); 6539 kvm_irqfd_exit(); 6540 } 6541 EXPORT_SYMBOL_GPL(kvm_exit); 6542