1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <kvm/iodev.h> 17 18 #include <linux/kvm_host.h> 19 #include <linux/kvm.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/percpu.h> 23 #include <linux/mm.h> 24 #include <linux/miscdevice.h> 25 #include <linux/vmalloc.h> 26 #include <linux/reboot.h> 27 #include <linux/debugfs.h> 28 #include <linux/highmem.h> 29 #include <linux/file.h> 30 #include <linux/syscore_ops.h> 31 #include <linux/cpu.h> 32 #include <linux/sched/signal.h> 33 #include <linux/sched/mm.h> 34 #include <linux/sched/stat.h> 35 #include <linux/cpumask.h> 36 #include <linux/smp.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/profile.h> 39 #include <linux/kvm_para.h> 40 #include <linux/pagemap.h> 41 #include <linux/mman.h> 42 #include <linux/swap.h> 43 #include <linux/bitops.h> 44 #include <linux/spinlock.h> 45 #include <linux/compat.h> 46 #include <linux/srcu.h> 47 #include <linux/hugetlb.h> 48 #include <linux/slab.h> 49 #include <linux/sort.h> 50 #include <linux/bsearch.h> 51 #include <linux/io.h> 52 #include <linux/lockdep.h> 53 #include <linux/kthread.h> 54 #include <linux/suspend.h> 55 56 #include <asm/processor.h> 57 #include <asm/ioctl.h> 58 #include <linux/uaccess.h> 59 60 #include "coalesced_mmio.h" 61 #include "async_pf.h" 62 #include "mmu_lock.h" 63 #include "vfio.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/kvm.h> 67 68 #include <linux/kvm_dirty_ring.h> 69 70 /* Worst case buffer size needed for holding an integer. */ 71 #define ITOA_MAX_LEN 12 72 73 MODULE_AUTHOR("Qumranet"); 74 MODULE_LICENSE("GPL"); 75 76 /* Architectures should define their poll value according to the halt latency */ 77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 78 module_param(halt_poll_ns, uint, 0644); 79 EXPORT_SYMBOL_GPL(halt_poll_ns); 80 81 /* Default doubles per-vcpu halt_poll_ns. */ 82 unsigned int halt_poll_ns_grow = 2; 83 module_param(halt_poll_ns_grow, uint, 0644); 84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 85 86 /* The start value to grow halt_poll_ns from */ 87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */ 88 module_param(halt_poll_ns_grow_start, uint, 0644); 89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start); 90 91 /* Default resets per-vcpu halt_poll_ns . */ 92 unsigned int halt_poll_ns_shrink; 93 module_param(halt_poll_ns_shrink, uint, 0644); 94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink); 95 96 /* 97 * Ordering of locks: 98 * 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 100 */ 101 102 DEFINE_MUTEX(kvm_lock); 103 static DEFINE_RAW_SPINLOCK(kvm_count_lock); 104 LIST_HEAD(vm_list); 105 106 static cpumask_var_t cpus_hardware_enabled; 107 static int kvm_usage_count; 108 static atomic_t hardware_enable_failed; 109 110 static struct kmem_cache *kvm_vcpu_cache; 111 112 static __read_mostly struct preempt_ops kvm_preempt_ops; 113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); 114 115 struct dentry *kvm_debugfs_dir; 116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 117 118 static const struct file_operations stat_fops_per_vm; 119 120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 unsigned long arg); 122 #ifdef CONFIG_KVM_COMPAT 123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 124 unsigned long arg); 125 #define KVM_COMPAT(c) .compat_ioctl = (c) 126 #else 127 /* 128 * For architectures that don't implement a compat infrastructure, 129 * adopt a double line of defense: 130 * - Prevent a compat task from opening /dev/kvm 131 * - If the open has been done by a 64bit task, and the KVM fd 132 * passed to a compat task, let the ioctls fail. 133 */ 134 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, 135 unsigned long arg) { return -EINVAL; } 136 137 static int kvm_no_compat_open(struct inode *inode, struct file *file) 138 { 139 return is_compat_task() ? -ENODEV : 0; 140 } 141 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \ 142 .open = kvm_no_compat_open 143 #endif 144 static int hardware_enable_all(void); 145 static void hardware_disable_all(void); 146 147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 148 149 __visible bool kvm_rebooting; 150 EXPORT_SYMBOL_GPL(kvm_rebooting); 151 152 #define KVM_EVENT_CREATE_VM 0 153 #define KVM_EVENT_DESTROY_VM 1 154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 155 static unsigned long long kvm_createvm_count; 156 static unsigned long long kvm_active_vms; 157 158 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 159 unsigned long start, unsigned long end) 160 { 161 } 162 163 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) 164 { 165 /* 166 * The metadata used by is_zone_device_page() to determine whether or 167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if 168 * the device has been pinned, e.g. by get_user_pages(). WARN if the 169 * page_count() is zero to help detect bad usage of this helper. 170 */ 171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) 172 return false; 173 174 return is_zone_device_page(pfn_to_page(pfn)); 175 } 176 177 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 178 { 179 /* 180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting 181 * perspective they are "normal" pages, albeit with slightly different 182 * usage rules. 183 */ 184 if (pfn_valid(pfn)) 185 return PageReserved(pfn_to_page(pfn)) && 186 !is_zero_pfn(pfn) && 187 !kvm_is_zone_device_pfn(pfn); 188 189 return true; 190 } 191 192 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn) 193 { 194 struct page *page = pfn_to_page(pfn); 195 196 if (!PageTransCompoundMap(page)) 197 return false; 198 199 return is_transparent_hugepage(compound_head(page)); 200 } 201 202 /* 203 * Switches to specified vcpu, until a matching vcpu_put() 204 */ 205 void vcpu_load(struct kvm_vcpu *vcpu) 206 { 207 int cpu = get_cpu(); 208 209 __this_cpu_write(kvm_running_vcpu, vcpu); 210 preempt_notifier_register(&vcpu->preempt_notifier); 211 kvm_arch_vcpu_load(vcpu, cpu); 212 put_cpu(); 213 } 214 EXPORT_SYMBOL_GPL(vcpu_load); 215 216 void vcpu_put(struct kvm_vcpu *vcpu) 217 { 218 preempt_disable(); 219 kvm_arch_vcpu_put(vcpu); 220 preempt_notifier_unregister(&vcpu->preempt_notifier); 221 __this_cpu_write(kvm_running_vcpu, NULL); 222 preempt_enable(); 223 } 224 EXPORT_SYMBOL_GPL(vcpu_put); 225 226 /* TODO: merge with kvm_arch_vcpu_should_kick */ 227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req) 228 { 229 int mode = kvm_vcpu_exiting_guest_mode(vcpu); 230 231 /* 232 * We need to wait for the VCPU to reenable interrupts and get out of 233 * READING_SHADOW_PAGE_TABLES mode. 234 */ 235 if (req & KVM_REQUEST_WAIT) 236 return mode != OUTSIDE_GUEST_MODE; 237 238 /* 239 * Need to kick a running VCPU, but otherwise there is nothing to do. 240 */ 241 return mode == IN_GUEST_MODE; 242 } 243 244 static void ack_flush(void *_completed) 245 { 246 } 247 248 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) 249 { 250 if (unlikely(!cpus)) 251 cpus = cpu_online_mask; 252 253 if (cpumask_empty(cpus)) 254 return false; 255 256 smp_call_function_many(cpus, ack_flush, NULL, wait); 257 return true; 258 } 259 260 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 261 struct kvm_vcpu *except, 262 unsigned long *vcpu_bitmap, cpumask_var_t tmp) 263 { 264 int i, cpu, me; 265 struct kvm_vcpu *vcpu; 266 bool called; 267 268 me = get_cpu(); 269 270 kvm_for_each_vcpu(i, vcpu, kvm) { 271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || 272 vcpu == except) 273 continue; 274 275 kvm_make_request(req, vcpu); 276 cpu = vcpu->cpu; 277 278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 279 continue; 280 281 if (tmp != NULL && cpu != -1 && cpu != me && 282 kvm_request_needs_ipi(vcpu, req)) 283 __cpumask_set_cpu(cpu, tmp); 284 } 285 286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); 287 put_cpu(); 288 289 return called; 290 } 291 292 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, 293 struct kvm_vcpu *except) 294 { 295 cpumask_var_t cpus; 296 bool called; 297 298 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 299 300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); 301 302 free_cpumask_var(cpus); 303 return called; 304 } 305 306 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 307 { 308 return kvm_make_all_cpus_request_except(kvm, req, NULL); 309 } 310 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); 311 312 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 313 void kvm_flush_remote_tlbs(struct kvm *kvm) 314 { 315 /* 316 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 317 * kvm_make_all_cpus_request. 318 */ 319 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 320 321 /* 322 * We want to publish modifications to the page tables before reading 323 * mode. Pairs with a memory barrier in arch-specific code. 324 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest 325 * and smp_mb in walk_shadow_page_lockless_begin/end. 326 * - powerpc: smp_mb in kvmppc_prepare_to_enter. 327 * 328 * There is already an smp_mb__after_atomic() before 329 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that 330 * barrier here. 331 */ 332 if (!kvm_arch_flush_remote_tlb(kvm) 333 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 334 ++kvm->stat.generic.remote_tlb_flush; 335 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 336 } 337 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 338 #endif 339 340 void kvm_reload_remote_mmus(struct kvm *kvm) 341 { 342 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 343 } 344 345 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 346 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, 347 gfp_t gfp_flags) 348 { 349 gfp_flags |= mc->gfp_zero; 350 351 if (mc->kmem_cache) 352 return kmem_cache_alloc(mc->kmem_cache, gfp_flags); 353 else 354 return (void *)__get_free_page(gfp_flags); 355 } 356 357 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) 358 { 359 void *obj; 360 361 if (mc->nobjs >= min) 362 return 0; 363 while (mc->nobjs < ARRAY_SIZE(mc->objects)) { 364 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); 365 if (!obj) 366 return mc->nobjs >= min ? 0 : -ENOMEM; 367 mc->objects[mc->nobjs++] = obj; 368 } 369 return 0; 370 } 371 372 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) 373 { 374 return mc->nobjs; 375 } 376 377 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 378 { 379 while (mc->nobjs) { 380 if (mc->kmem_cache) 381 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); 382 else 383 free_page((unsigned long)mc->objects[--mc->nobjs]); 384 } 385 } 386 387 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 388 { 389 void *p; 390 391 if (WARN_ON(!mc->nobjs)) 392 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); 393 else 394 p = mc->objects[--mc->nobjs]; 395 BUG_ON(!p); 396 return p; 397 } 398 #endif 399 400 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 401 { 402 mutex_init(&vcpu->mutex); 403 vcpu->cpu = -1; 404 vcpu->kvm = kvm; 405 vcpu->vcpu_id = id; 406 vcpu->pid = NULL; 407 rcuwait_init(&vcpu->wait); 408 kvm_async_pf_vcpu_init(vcpu); 409 410 vcpu->pre_pcpu = -1; 411 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 412 413 kvm_vcpu_set_in_spin_loop(vcpu, false); 414 kvm_vcpu_set_dy_eligible(vcpu, false); 415 vcpu->preempted = false; 416 vcpu->ready = false; 417 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 418 } 419 420 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) 421 { 422 kvm_dirty_ring_free(&vcpu->dirty_ring); 423 kvm_arch_vcpu_destroy(vcpu); 424 425 /* 426 * No need for rcu_read_lock as VCPU_RUN is the only place that changes 427 * the vcpu->pid pointer, and at destruction time all file descriptors 428 * are already gone. 429 */ 430 put_pid(rcu_dereference_protected(vcpu->pid, 1)); 431 432 free_page((unsigned long)vcpu->run); 433 kmem_cache_free(kvm_vcpu_cache, vcpu); 434 } 435 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); 436 437 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 438 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 439 { 440 return container_of(mn, struct kvm, mmu_notifier); 441 } 442 443 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, 444 struct mm_struct *mm, 445 unsigned long start, unsigned long end) 446 { 447 struct kvm *kvm = mmu_notifier_to_kvm(mn); 448 int idx; 449 450 idx = srcu_read_lock(&kvm->srcu); 451 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 452 srcu_read_unlock(&kvm->srcu, idx); 453 } 454 455 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); 456 457 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, 458 unsigned long end); 459 460 struct kvm_hva_range { 461 unsigned long start; 462 unsigned long end; 463 pte_t pte; 464 hva_handler_t handler; 465 on_lock_fn_t on_lock; 466 bool flush_on_ret; 467 bool may_block; 468 }; 469 470 /* 471 * Use a dedicated stub instead of NULL to indicate that there is no callback 472 * function/handler. The compiler technically can't guarantee that a real 473 * function will have a non-zero address, and so it will generate code to 474 * check for !NULL, whereas comparing against a stub will be elided at compile 475 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9). 476 */ 477 static void kvm_null_fn(void) 478 { 479 480 } 481 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) 482 483 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, 484 const struct kvm_hva_range *range) 485 { 486 bool ret = false, locked = false; 487 struct kvm_gfn_range gfn_range; 488 struct kvm_memory_slot *slot; 489 struct kvm_memslots *slots; 490 int i, idx; 491 492 /* A null handler is allowed if and only if on_lock() is provided. */ 493 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && 494 IS_KVM_NULL_FN(range->handler))) 495 return 0; 496 497 idx = srcu_read_lock(&kvm->srcu); 498 499 /* The on_lock() path does not yet support lock elision. */ 500 if (!IS_KVM_NULL_FN(range->on_lock)) { 501 locked = true; 502 KVM_MMU_LOCK(kvm); 503 504 range->on_lock(kvm, range->start, range->end); 505 506 if (IS_KVM_NULL_FN(range->handler)) 507 goto out_unlock; 508 } 509 510 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 511 slots = __kvm_memslots(kvm, i); 512 kvm_for_each_memslot(slot, slots) { 513 unsigned long hva_start, hva_end; 514 515 hva_start = max(range->start, slot->userspace_addr); 516 hva_end = min(range->end, slot->userspace_addr + 517 (slot->npages << PAGE_SHIFT)); 518 if (hva_start >= hva_end) 519 continue; 520 521 /* 522 * To optimize for the likely case where the address 523 * range is covered by zero or one memslots, don't 524 * bother making these conditional (to avoid writes on 525 * the second or later invocation of the handler). 526 */ 527 gfn_range.pte = range->pte; 528 gfn_range.may_block = range->may_block; 529 530 /* 531 * {gfn(page) | page intersects with [hva_start, hva_end)} = 532 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 533 */ 534 gfn_range.start = hva_to_gfn_memslot(hva_start, slot); 535 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot); 536 gfn_range.slot = slot; 537 538 if (!locked) { 539 locked = true; 540 KVM_MMU_LOCK(kvm); 541 } 542 ret |= range->handler(kvm, &gfn_range); 543 } 544 } 545 546 if (range->flush_on_ret && (ret || kvm->tlbs_dirty)) 547 kvm_flush_remote_tlbs(kvm); 548 549 out_unlock: 550 if (locked) 551 KVM_MMU_UNLOCK(kvm); 552 553 srcu_read_unlock(&kvm->srcu, idx); 554 555 /* The notifiers are averse to booleans. :-( */ 556 return (int)ret; 557 } 558 559 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, 560 unsigned long start, 561 unsigned long end, 562 pte_t pte, 563 hva_handler_t handler) 564 { 565 struct kvm *kvm = mmu_notifier_to_kvm(mn); 566 const struct kvm_hva_range range = { 567 .start = start, 568 .end = end, 569 .pte = pte, 570 .handler = handler, 571 .on_lock = (void *)kvm_null_fn, 572 .flush_on_ret = true, 573 .may_block = false, 574 }; 575 576 return __kvm_handle_hva_range(kvm, &range); 577 } 578 579 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, 580 unsigned long start, 581 unsigned long end, 582 hva_handler_t handler) 583 { 584 struct kvm *kvm = mmu_notifier_to_kvm(mn); 585 const struct kvm_hva_range range = { 586 .start = start, 587 .end = end, 588 .pte = __pte(0), 589 .handler = handler, 590 .on_lock = (void *)kvm_null_fn, 591 .flush_on_ret = false, 592 .may_block = false, 593 }; 594 595 return __kvm_handle_hva_range(kvm, &range); 596 } 597 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 598 struct mm_struct *mm, 599 unsigned long address, 600 pte_t pte) 601 { 602 struct kvm *kvm = mmu_notifier_to_kvm(mn); 603 604 trace_kvm_set_spte_hva(address); 605 606 /* 607 * .change_pte() must be surrounded by .invalidate_range_{start,end}(), 608 * and so always runs with an elevated notifier count. This obviates 609 * the need to bump the sequence count. 610 */ 611 WARN_ON_ONCE(!kvm->mmu_notifier_count); 612 613 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn); 614 } 615 616 static void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, 617 unsigned long end) 618 { 619 /* 620 * The count increase must become visible at unlock time as no 621 * spte can be established without taking the mmu_lock and 622 * count is also read inside the mmu_lock critical section. 623 */ 624 kvm->mmu_notifier_count++; 625 if (likely(kvm->mmu_notifier_count == 1)) { 626 kvm->mmu_notifier_range_start = start; 627 kvm->mmu_notifier_range_end = end; 628 } else { 629 /* 630 * Fully tracking multiple concurrent ranges has dimishing 631 * returns. Keep things simple and just find the minimal range 632 * which includes the current and new ranges. As there won't be 633 * enough information to subtract a range after its invalidate 634 * completes, any ranges invalidated concurrently will 635 * accumulate and persist until all outstanding invalidates 636 * complete. 637 */ 638 kvm->mmu_notifier_range_start = 639 min(kvm->mmu_notifier_range_start, start); 640 kvm->mmu_notifier_range_end = 641 max(kvm->mmu_notifier_range_end, end); 642 } 643 } 644 645 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 646 const struct mmu_notifier_range *range) 647 { 648 struct kvm *kvm = mmu_notifier_to_kvm(mn); 649 const struct kvm_hva_range hva_range = { 650 .start = range->start, 651 .end = range->end, 652 .pte = __pte(0), 653 .handler = kvm_unmap_gfn_range, 654 .on_lock = kvm_inc_notifier_count, 655 .flush_on_ret = true, 656 .may_block = mmu_notifier_range_blockable(range), 657 }; 658 659 trace_kvm_unmap_hva_range(range->start, range->end); 660 661 __kvm_handle_hva_range(kvm, &hva_range); 662 663 return 0; 664 } 665 666 static void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, 667 unsigned long end) 668 { 669 /* 670 * This sequence increase will notify the kvm page fault that 671 * the page that is going to be mapped in the spte could have 672 * been freed. 673 */ 674 kvm->mmu_notifier_seq++; 675 smp_wmb(); 676 /* 677 * The above sequence increase must be visible before the 678 * below count decrease, which is ensured by the smp_wmb above 679 * in conjunction with the smp_rmb in mmu_notifier_retry(). 680 */ 681 kvm->mmu_notifier_count--; 682 } 683 684 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 685 const struct mmu_notifier_range *range) 686 { 687 struct kvm *kvm = mmu_notifier_to_kvm(mn); 688 const struct kvm_hva_range hva_range = { 689 .start = range->start, 690 .end = range->end, 691 .pte = __pte(0), 692 .handler = (void *)kvm_null_fn, 693 .on_lock = kvm_dec_notifier_count, 694 .flush_on_ret = false, 695 .may_block = mmu_notifier_range_blockable(range), 696 }; 697 698 __kvm_handle_hva_range(kvm, &hva_range); 699 700 BUG_ON(kvm->mmu_notifier_count < 0); 701 } 702 703 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 704 struct mm_struct *mm, 705 unsigned long start, 706 unsigned long end) 707 { 708 trace_kvm_age_hva(start, end); 709 710 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); 711 } 712 713 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 714 struct mm_struct *mm, 715 unsigned long start, 716 unsigned long end) 717 { 718 trace_kvm_age_hva(start, end); 719 720 /* 721 * Even though we do not flush TLB, this will still adversely 722 * affect performance on pre-Haswell Intel EPT, where there is 723 * no EPT Access Bit to clear so that we have to tear down EPT 724 * tables instead. If we find this unacceptable, we can always 725 * add a parameter to kvm_age_hva so that it effectively doesn't 726 * do anything on clear_young. 727 * 728 * Also note that currently we never issue secondary TLB flushes 729 * from clear_young, leaving this job up to the regular system 730 * cadence. If we find this inaccurate, we might come up with a 731 * more sophisticated heuristic later. 732 */ 733 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn); 734 } 735 736 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 737 struct mm_struct *mm, 738 unsigned long address) 739 { 740 trace_kvm_test_age_hva(address); 741 742 return kvm_handle_hva_range_no_flush(mn, address, address + 1, 743 kvm_test_age_gfn); 744 } 745 746 static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 747 struct mm_struct *mm) 748 { 749 struct kvm *kvm = mmu_notifier_to_kvm(mn); 750 int idx; 751 752 idx = srcu_read_lock(&kvm->srcu); 753 kvm_arch_flush_shadow_all(kvm); 754 srcu_read_unlock(&kvm->srcu, idx); 755 } 756 757 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 758 .invalidate_range = kvm_mmu_notifier_invalidate_range, 759 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 760 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 761 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 762 .clear_young = kvm_mmu_notifier_clear_young, 763 .test_young = kvm_mmu_notifier_test_young, 764 .change_pte = kvm_mmu_notifier_change_pte, 765 .release = kvm_mmu_notifier_release, 766 }; 767 768 static int kvm_init_mmu_notifier(struct kvm *kvm) 769 { 770 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 771 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 772 } 773 774 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 775 776 static int kvm_init_mmu_notifier(struct kvm *kvm) 777 { 778 return 0; 779 } 780 781 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 782 783 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 784 static int kvm_pm_notifier_call(struct notifier_block *bl, 785 unsigned long state, 786 void *unused) 787 { 788 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); 789 790 return kvm_arch_pm_notifier(kvm, state); 791 } 792 793 static void kvm_init_pm_notifier(struct kvm *kvm) 794 { 795 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; 796 /* Suspend KVM before we suspend ftrace, RCU, etc. */ 797 kvm->pm_notifier.priority = INT_MAX; 798 register_pm_notifier(&kvm->pm_notifier); 799 } 800 801 static void kvm_destroy_pm_notifier(struct kvm *kvm) 802 { 803 unregister_pm_notifier(&kvm->pm_notifier); 804 } 805 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */ 806 static void kvm_init_pm_notifier(struct kvm *kvm) 807 { 808 } 809 810 static void kvm_destroy_pm_notifier(struct kvm *kvm) 811 { 812 } 813 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 814 815 static struct kvm_memslots *kvm_alloc_memslots(void) 816 { 817 int i; 818 struct kvm_memslots *slots; 819 820 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); 821 if (!slots) 822 return NULL; 823 824 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 825 slots->id_to_index[i] = -1; 826 827 return slots; 828 } 829 830 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 831 { 832 if (!memslot->dirty_bitmap) 833 return; 834 835 kvfree(memslot->dirty_bitmap); 836 memslot->dirty_bitmap = NULL; 837 } 838 839 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 840 { 841 kvm_destroy_dirty_bitmap(slot); 842 843 kvm_arch_free_memslot(kvm, slot); 844 845 slot->flags = 0; 846 slot->npages = 0; 847 } 848 849 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 850 { 851 struct kvm_memory_slot *memslot; 852 853 if (!slots) 854 return; 855 856 kvm_for_each_memslot(memslot, slots) 857 kvm_free_memslot(kvm, memslot); 858 859 kvfree(slots); 860 } 861 862 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 863 { 864 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 865 case KVM_STATS_TYPE_INSTANT: 866 return 0444; 867 case KVM_STATS_TYPE_CUMULATIVE: 868 case KVM_STATS_TYPE_PEAK: 869 default: 870 return 0644; 871 } 872 } 873 874 875 static void kvm_destroy_vm_debugfs(struct kvm *kvm) 876 { 877 int i; 878 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 879 kvm_vcpu_stats_header.num_desc; 880 881 if (!kvm->debugfs_dentry) 882 return; 883 884 debugfs_remove_recursive(kvm->debugfs_dentry); 885 886 if (kvm->debugfs_stat_data) { 887 for (i = 0; i < kvm_debugfs_num_entries; i++) 888 kfree(kvm->debugfs_stat_data[i]); 889 kfree(kvm->debugfs_stat_data); 890 } 891 } 892 893 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) 894 { 895 char dir_name[ITOA_MAX_LEN * 2]; 896 struct kvm_stat_data *stat_data; 897 const struct _kvm_stats_desc *pdesc; 898 int i; 899 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 900 kvm_vcpu_stats_header.num_desc; 901 902 if (!debugfs_initialized()) 903 return 0; 904 905 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); 906 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); 907 908 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 909 sizeof(*kvm->debugfs_stat_data), 910 GFP_KERNEL_ACCOUNT); 911 if (!kvm->debugfs_stat_data) 912 return -ENOMEM; 913 914 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 915 pdesc = &kvm_vm_stats_desc[i]; 916 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 917 if (!stat_data) 918 return -ENOMEM; 919 920 stat_data->kvm = kvm; 921 stat_data->desc = pdesc; 922 stat_data->kind = KVM_STAT_VM; 923 kvm->debugfs_stat_data[i] = stat_data; 924 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 925 kvm->debugfs_dentry, stat_data, 926 &stat_fops_per_vm); 927 } 928 929 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 930 pdesc = &kvm_vcpu_stats_desc[i]; 931 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT); 932 if (!stat_data) 933 return -ENOMEM; 934 935 stat_data->kvm = kvm; 936 stat_data->desc = pdesc; 937 stat_data->kind = KVM_STAT_VCPU; 938 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; 939 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 940 kvm->debugfs_dentry, stat_data, 941 &stat_fops_per_vm); 942 } 943 return 0; 944 } 945 946 /* 947 * Called after the VM is otherwise initialized, but just before adding it to 948 * the vm_list. 949 */ 950 int __weak kvm_arch_post_init_vm(struct kvm *kvm) 951 { 952 return 0; 953 } 954 955 /* 956 * Called just after removing the VM from the vm_list, but before doing any 957 * other destruction. 958 */ 959 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) 960 { 961 } 962 963 static struct kvm *kvm_create_vm(unsigned long type) 964 { 965 struct kvm *kvm = kvm_arch_alloc_vm(); 966 int r = -ENOMEM; 967 int i; 968 969 if (!kvm) 970 return ERR_PTR(-ENOMEM); 971 972 KVM_MMU_LOCK_INIT(kvm); 973 mmgrab(current->mm); 974 kvm->mm = current->mm; 975 kvm_eventfd_init(kvm); 976 mutex_init(&kvm->lock); 977 mutex_init(&kvm->irq_lock); 978 mutex_init(&kvm->slots_lock); 979 mutex_init(&kvm->slots_arch_lock); 980 INIT_LIST_HEAD(&kvm->devices); 981 982 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 983 984 if (init_srcu_struct(&kvm->srcu)) 985 goto out_err_no_srcu; 986 if (init_srcu_struct(&kvm->irq_srcu)) 987 goto out_err_no_irq_srcu; 988 989 refcount_set(&kvm->users_count, 1); 990 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 991 struct kvm_memslots *slots = kvm_alloc_memslots(); 992 993 if (!slots) 994 goto out_err_no_arch_destroy_vm; 995 /* Generations must be different for each address space. */ 996 slots->generation = i; 997 rcu_assign_pointer(kvm->memslots[i], slots); 998 } 999 1000 for (i = 0; i < KVM_NR_BUSES; i++) { 1001 rcu_assign_pointer(kvm->buses[i], 1002 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT)); 1003 if (!kvm->buses[i]) 1004 goto out_err_no_arch_destroy_vm; 1005 } 1006 1007 kvm->max_halt_poll_ns = halt_poll_ns; 1008 1009 r = kvm_arch_init_vm(kvm, type); 1010 if (r) 1011 goto out_err_no_arch_destroy_vm; 1012 1013 r = hardware_enable_all(); 1014 if (r) 1015 goto out_err_no_disable; 1016 1017 #ifdef CONFIG_HAVE_KVM_IRQFD 1018 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 1019 #endif 1020 1021 r = kvm_init_mmu_notifier(kvm); 1022 if (r) 1023 goto out_err_no_mmu_notifier; 1024 1025 r = kvm_arch_post_init_vm(kvm); 1026 if (r) 1027 goto out_err; 1028 1029 mutex_lock(&kvm_lock); 1030 list_add(&kvm->vm_list, &vm_list); 1031 mutex_unlock(&kvm_lock); 1032 1033 preempt_notifier_inc(); 1034 kvm_init_pm_notifier(kvm); 1035 1036 return kvm; 1037 1038 out_err: 1039 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1040 if (kvm->mmu_notifier.ops) 1041 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); 1042 #endif 1043 out_err_no_mmu_notifier: 1044 hardware_disable_all(); 1045 out_err_no_disable: 1046 kvm_arch_destroy_vm(kvm); 1047 out_err_no_arch_destroy_vm: 1048 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1049 for (i = 0; i < KVM_NR_BUSES; i++) 1050 kfree(kvm_get_bus(kvm, i)); 1051 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 1052 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 1053 cleanup_srcu_struct(&kvm->irq_srcu); 1054 out_err_no_irq_srcu: 1055 cleanup_srcu_struct(&kvm->srcu); 1056 out_err_no_srcu: 1057 kvm_arch_free_vm(kvm); 1058 mmdrop(current->mm); 1059 return ERR_PTR(r); 1060 } 1061 1062 static void kvm_destroy_devices(struct kvm *kvm) 1063 { 1064 struct kvm_device *dev, *tmp; 1065 1066 /* 1067 * We do not need to take the kvm->lock here, because nobody else 1068 * has a reference to the struct kvm at this point and therefore 1069 * cannot access the devices list anyhow. 1070 */ 1071 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 1072 list_del(&dev->vm_node); 1073 dev->ops->destroy(dev); 1074 } 1075 } 1076 1077 static void kvm_destroy_vm(struct kvm *kvm) 1078 { 1079 int i; 1080 struct mm_struct *mm = kvm->mm; 1081 1082 kvm_destroy_pm_notifier(kvm); 1083 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); 1084 kvm_destroy_vm_debugfs(kvm); 1085 kvm_arch_sync_events(kvm); 1086 mutex_lock(&kvm_lock); 1087 list_del(&kvm->vm_list); 1088 mutex_unlock(&kvm_lock); 1089 kvm_arch_pre_destroy_vm(kvm); 1090 1091 kvm_free_irq_routing(kvm); 1092 for (i = 0; i < KVM_NR_BUSES; i++) { 1093 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1094 1095 if (bus) 1096 kvm_io_bus_destroy(bus); 1097 kvm->buses[i] = NULL; 1098 } 1099 kvm_coalesced_mmio_free(kvm); 1100 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 1101 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 1102 #else 1103 kvm_arch_flush_shadow_all(kvm); 1104 #endif 1105 kvm_arch_destroy_vm(kvm); 1106 kvm_destroy_devices(kvm); 1107 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 1108 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); 1109 cleanup_srcu_struct(&kvm->irq_srcu); 1110 cleanup_srcu_struct(&kvm->srcu); 1111 kvm_arch_free_vm(kvm); 1112 preempt_notifier_dec(); 1113 hardware_disable_all(); 1114 mmdrop(mm); 1115 } 1116 1117 void kvm_get_kvm(struct kvm *kvm) 1118 { 1119 refcount_inc(&kvm->users_count); 1120 } 1121 EXPORT_SYMBOL_GPL(kvm_get_kvm); 1122 1123 void kvm_put_kvm(struct kvm *kvm) 1124 { 1125 if (refcount_dec_and_test(&kvm->users_count)) 1126 kvm_destroy_vm(kvm); 1127 } 1128 EXPORT_SYMBOL_GPL(kvm_put_kvm); 1129 1130 /* 1131 * Used to put a reference that was taken on behalf of an object associated 1132 * with a user-visible file descriptor, e.g. a vcpu or device, if installation 1133 * of the new file descriptor fails and the reference cannot be transferred to 1134 * its final owner. In such cases, the caller is still actively using @kvm and 1135 * will fail miserably if the refcount unexpectedly hits zero. 1136 */ 1137 void kvm_put_kvm_no_destroy(struct kvm *kvm) 1138 { 1139 WARN_ON(refcount_dec_and_test(&kvm->users_count)); 1140 } 1141 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy); 1142 1143 static int kvm_vm_release(struct inode *inode, struct file *filp) 1144 { 1145 struct kvm *kvm = filp->private_data; 1146 1147 kvm_irqfd_release(kvm); 1148 1149 kvm_put_kvm(kvm); 1150 return 0; 1151 } 1152 1153 /* 1154 * Allocation size is twice as large as the actual dirty bitmap size. 1155 * See kvm_vm_ioctl_get_dirty_log() why this is needed. 1156 */ 1157 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) 1158 { 1159 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 1160 1161 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT); 1162 if (!memslot->dirty_bitmap) 1163 return -ENOMEM; 1164 1165 return 0; 1166 } 1167 1168 /* 1169 * Delete a memslot by decrementing the number of used slots and shifting all 1170 * other entries in the array forward one spot. 1171 */ 1172 static inline void kvm_memslot_delete(struct kvm_memslots *slots, 1173 struct kvm_memory_slot *memslot) 1174 { 1175 struct kvm_memory_slot *mslots = slots->memslots; 1176 int i; 1177 1178 if (WARN_ON(slots->id_to_index[memslot->id] == -1)) 1179 return; 1180 1181 slots->used_slots--; 1182 1183 if (atomic_read(&slots->lru_slot) >= slots->used_slots) 1184 atomic_set(&slots->lru_slot, 0); 1185 1186 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { 1187 mslots[i] = mslots[i + 1]; 1188 slots->id_to_index[mslots[i].id] = i; 1189 } 1190 mslots[i] = *memslot; 1191 slots->id_to_index[memslot->id] = -1; 1192 } 1193 1194 /* 1195 * "Insert" a new memslot by incrementing the number of used slots. Returns 1196 * the new slot's initial index into the memslots array. 1197 */ 1198 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) 1199 { 1200 return slots->used_slots++; 1201 } 1202 1203 /* 1204 * Move a changed memslot backwards in the array by shifting existing slots 1205 * with a higher GFN toward the front of the array. Note, the changed memslot 1206 * itself is not preserved in the array, i.e. not swapped at this time, only 1207 * its new index into the array is tracked. Returns the changed memslot's 1208 * current index into the memslots array. 1209 */ 1210 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, 1211 struct kvm_memory_slot *memslot) 1212 { 1213 struct kvm_memory_slot *mslots = slots->memslots; 1214 int i; 1215 1216 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || 1217 WARN_ON_ONCE(!slots->used_slots)) 1218 return -1; 1219 1220 /* 1221 * Move the target memslot backward in the array by shifting existing 1222 * memslots with a higher GFN (than the target memslot) towards the 1223 * front of the array. 1224 */ 1225 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { 1226 if (memslot->base_gfn > mslots[i + 1].base_gfn) 1227 break; 1228 1229 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); 1230 1231 /* Shift the next memslot forward one and update its index. */ 1232 mslots[i] = mslots[i + 1]; 1233 slots->id_to_index[mslots[i].id] = i; 1234 } 1235 return i; 1236 } 1237 1238 /* 1239 * Move a changed memslot forwards in the array by shifting existing slots with 1240 * a lower GFN toward the back of the array. Note, the changed memslot itself 1241 * is not preserved in the array, i.e. not swapped at this time, only its new 1242 * index into the array is tracked. Returns the changed memslot's final index 1243 * into the memslots array. 1244 */ 1245 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, 1246 struct kvm_memory_slot *memslot, 1247 int start) 1248 { 1249 struct kvm_memory_slot *mslots = slots->memslots; 1250 int i; 1251 1252 for (i = start; i > 0; i--) { 1253 if (memslot->base_gfn < mslots[i - 1].base_gfn) 1254 break; 1255 1256 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); 1257 1258 /* Shift the next memslot back one and update its index. */ 1259 mslots[i] = mslots[i - 1]; 1260 slots->id_to_index[mslots[i].id] = i; 1261 } 1262 return i; 1263 } 1264 1265 /* 1266 * Re-sort memslots based on their GFN to account for an added, deleted, or 1267 * moved memslot. Sorting memslots by GFN allows using a binary search during 1268 * memslot lookup. 1269 * 1270 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry 1271 * at memslots[0] has the highest GFN. 1272 * 1273 * The sorting algorithm takes advantage of having initially sorted memslots 1274 * and knowing the position of the changed memslot. Sorting is also optimized 1275 * by not swapping the updated memslot and instead only shifting other memslots 1276 * and tracking the new index for the update memslot. Only once its final 1277 * index is known is the updated memslot copied into its position in the array. 1278 * 1279 * - When deleting a memslot, the deleted memslot simply needs to be moved to 1280 * the end of the array. 1281 * 1282 * - When creating a memslot, the algorithm "inserts" the new memslot at the 1283 * end of the array and then it forward to its correct location. 1284 * 1285 * - When moving a memslot, the algorithm first moves the updated memslot 1286 * backward to handle the scenario where the memslot's GFN was changed to a 1287 * lower value. update_memslots() then falls through and runs the same flow 1288 * as creating a memslot to move the memslot forward to handle the scenario 1289 * where its GFN was changed to a higher value. 1290 * 1291 * Note, slots are sorted from highest->lowest instead of lowest->highest for 1292 * historical reasons. Originally, invalid memslots where denoted by having 1293 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots 1294 * to the end of the array. The current algorithm uses dedicated logic to 1295 * delete a memslot and thus does not rely on invalid memslots having GFN=0. 1296 * 1297 * The other historical motiviation for highest->lowest was to improve the 1298 * performance of memslot lookup. KVM originally used a linear search starting 1299 * at memslots[0]. On x86, the largest memslot usually has one of the highest, 1300 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a 1301 * single memslot above the 4gb boundary. As the largest memslot is also the 1302 * most likely to be referenced, sorting it to the front of the array was 1303 * advantageous. The current binary search starts from the middle of the array 1304 * and uses an LRU pointer to improve performance for all memslots and GFNs. 1305 */ 1306 static void update_memslots(struct kvm_memslots *slots, 1307 struct kvm_memory_slot *memslot, 1308 enum kvm_mr_change change) 1309 { 1310 int i; 1311 1312 if (change == KVM_MR_DELETE) { 1313 kvm_memslot_delete(slots, memslot); 1314 } else { 1315 if (change == KVM_MR_CREATE) 1316 i = kvm_memslot_insert_back(slots); 1317 else 1318 i = kvm_memslot_move_backward(slots, memslot); 1319 i = kvm_memslot_move_forward(slots, memslot, i); 1320 1321 /* 1322 * Copy the memslot to its new position in memslots and update 1323 * its index accordingly. 1324 */ 1325 slots->memslots[i] = *memslot; 1326 slots->id_to_index[memslot->id] = i; 1327 } 1328 } 1329 1330 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 1331 { 1332 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 1333 1334 #ifdef __KVM_HAVE_READONLY_MEM 1335 valid_flags |= KVM_MEM_READONLY; 1336 #endif 1337 1338 if (mem->flags & ~valid_flags) 1339 return -EINVAL; 1340 1341 return 0; 1342 } 1343 1344 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 1345 int as_id, struct kvm_memslots *slots) 1346 { 1347 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 1348 u64 gen = old_memslots->generation; 1349 1350 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 1351 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1352 1353 rcu_assign_pointer(kvm->memslots[as_id], slots); 1354 1355 /* 1356 * Acquired in kvm_set_memslot. Must be released before synchronize 1357 * SRCU below in order to avoid deadlock with another thread 1358 * acquiring the slots_arch_lock in an srcu critical section. 1359 */ 1360 mutex_unlock(&kvm->slots_arch_lock); 1361 1362 synchronize_srcu_expedited(&kvm->srcu); 1363 1364 /* 1365 * Increment the new memslot generation a second time, dropping the 1366 * update in-progress flag and incrementing the generation based on 1367 * the number of address spaces. This provides a unique and easily 1368 * identifiable generation number while the memslots are in flux. 1369 */ 1370 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; 1371 1372 /* 1373 * Generations must be unique even across address spaces. We do not need 1374 * a global counter for that, instead the generation space is evenly split 1375 * across address spaces. For example, with two address spaces, address 1376 * space 0 will use generations 0, 2, 4, ... while address space 1 will 1377 * use generations 1, 3, 5, ... 1378 */ 1379 gen += KVM_ADDRESS_SPACE_NUM; 1380 1381 kvm_arch_memslots_updated(kvm, gen); 1382 1383 slots->generation = gen; 1384 1385 return old_memslots; 1386 } 1387 1388 static size_t kvm_memslots_size(int slots) 1389 { 1390 return sizeof(struct kvm_memslots) + 1391 (sizeof(struct kvm_memory_slot) * slots); 1392 } 1393 1394 static void kvm_copy_memslots(struct kvm_memslots *to, 1395 struct kvm_memslots *from) 1396 { 1397 memcpy(to, from, kvm_memslots_size(from->used_slots)); 1398 } 1399 1400 /* 1401 * Note, at a minimum, the current number of used slots must be allocated, even 1402 * when deleting a memslot, as we need a complete duplicate of the memslots for 1403 * use when invalidating a memslot prior to deleting/moving the memslot. 1404 */ 1405 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, 1406 enum kvm_mr_change change) 1407 { 1408 struct kvm_memslots *slots; 1409 size_t new_size; 1410 1411 if (change == KVM_MR_CREATE) 1412 new_size = kvm_memslots_size(old->used_slots + 1); 1413 else 1414 new_size = kvm_memslots_size(old->used_slots); 1415 1416 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); 1417 if (likely(slots)) 1418 kvm_copy_memslots(slots, old); 1419 1420 return slots; 1421 } 1422 1423 static int kvm_set_memslot(struct kvm *kvm, 1424 const struct kvm_userspace_memory_region *mem, 1425 struct kvm_memory_slot *old, 1426 struct kvm_memory_slot *new, int as_id, 1427 enum kvm_mr_change change) 1428 { 1429 struct kvm_memory_slot *slot; 1430 struct kvm_memslots *slots; 1431 int r; 1432 1433 /* 1434 * Released in install_new_memslots. 1435 * 1436 * Must be held from before the current memslots are copied until 1437 * after the new memslots are installed with rcu_assign_pointer, 1438 * then released before the synchronize srcu in install_new_memslots. 1439 * 1440 * When modifying memslots outside of the slots_lock, must be held 1441 * before reading the pointer to the current memslots until after all 1442 * changes to those memslots are complete. 1443 * 1444 * These rules ensure that installing new memslots does not lose 1445 * changes made to the previous memslots. 1446 */ 1447 mutex_lock(&kvm->slots_arch_lock); 1448 1449 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); 1450 if (!slots) { 1451 mutex_unlock(&kvm->slots_arch_lock); 1452 return -ENOMEM; 1453 } 1454 1455 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1456 /* 1457 * Note, the INVALID flag needs to be in the appropriate entry 1458 * in the freshly allocated memslots, not in @old or @new. 1459 */ 1460 slot = id_to_memslot(slots, old->id); 1461 slot->flags |= KVM_MEMSLOT_INVALID; 1462 1463 /* 1464 * We can re-use the memory from the old memslots. 1465 * It will be overwritten with a copy of the new memslots 1466 * after reacquiring the slots_arch_lock below. 1467 */ 1468 slots = install_new_memslots(kvm, as_id, slots); 1469 1470 /* From this point no new shadow pages pointing to a deleted, 1471 * or moved, memslot will be created. 1472 * 1473 * validation of sp->gfn happens in: 1474 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 1475 * - kvm_is_visible_gfn (mmu_check_root) 1476 */ 1477 kvm_arch_flush_shadow_memslot(kvm, slot); 1478 1479 /* Released in install_new_memslots. */ 1480 mutex_lock(&kvm->slots_arch_lock); 1481 1482 /* 1483 * The arch-specific fields of the memslots could have changed 1484 * between releasing the slots_arch_lock in 1485 * install_new_memslots and here, so get a fresh copy of the 1486 * slots. 1487 */ 1488 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); 1489 } 1490 1491 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); 1492 if (r) 1493 goto out_slots; 1494 1495 update_memslots(slots, new, change); 1496 slots = install_new_memslots(kvm, as_id, slots); 1497 1498 kvm_arch_commit_memory_region(kvm, mem, old, new, change); 1499 1500 kvfree(slots); 1501 return 0; 1502 1503 out_slots: 1504 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1505 slot = id_to_memslot(slots, old->id); 1506 slot->flags &= ~KVM_MEMSLOT_INVALID; 1507 slots = install_new_memslots(kvm, as_id, slots); 1508 } else { 1509 mutex_unlock(&kvm->slots_arch_lock); 1510 } 1511 kvfree(slots); 1512 return r; 1513 } 1514 1515 static int kvm_delete_memslot(struct kvm *kvm, 1516 const struct kvm_userspace_memory_region *mem, 1517 struct kvm_memory_slot *old, int as_id) 1518 { 1519 struct kvm_memory_slot new; 1520 int r; 1521 1522 if (!old->npages) 1523 return -EINVAL; 1524 1525 memset(&new, 0, sizeof(new)); 1526 new.id = old->id; 1527 /* 1528 * This is only for debugging purpose; it should never be referenced 1529 * for a removed memslot. 1530 */ 1531 new.as_id = as_id; 1532 1533 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); 1534 if (r) 1535 return r; 1536 1537 kvm_free_memslot(kvm, old); 1538 return 0; 1539 } 1540 1541 /* 1542 * Allocate some memory and give it an address in the guest physical address 1543 * space. 1544 * 1545 * Discontiguous memory is allowed, mostly for framebuffers. 1546 * 1547 * Must be called holding kvm->slots_lock for write. 1548 */ 1549 int __kvm_set_memory_region(struct kvm *kvm, 1550 const struct kvm_userspace_memory_region *mem) 1551 { 1552 struct kvm_memory_slot old, new; 1553 struct kvm_memory_slot *tmp; 1554 enum kvm_mr_change change; 1555 int as_id, id; 1556 int r; 1557 1558 r = check_memory_region_flags(mem); 1559 if (r) 1560 return r; 1561 1562 as_id = mem->slot >> 16; 1563 id = (u16)mem->slot; 1564 1565 /* General sanity checks */ 1566 if (mem->memory_size & (PAGE_SIZE - 1)) 1567 return -EINVAL; 1568 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 1569 return -EINVAL; 1570 /* We can read the guest memory with __xxx_user() later on. */ 1571 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || 1572 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || 1573 !access_ok((void __user *)(unsigned long)mem->userspace_addr, 1574 mem->memory_size)) 1575 return -EINVAL; 1576 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 1577 return -EINVAL; 1578 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 1579 return -EINVAL; 1580 1581 /* 1582 * Make a full copy of the old memslot, the pointer will become stale 1583 * when the memslots are re-sorted by update_memslots(), and the old 1584 * memslot needs to be referenced after calling update_memslots(), e.g. 1585 * to free its resources and for arch specific behavior. 1586 */ 1587 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); 1588 if (tmp) { 1589 old = *tmp; 1590 tmp = NULL; 1591 } else { 1592 memset(&old, 0, sizeof(old)); 1593 old.id = id; 1594 } 1595 1596 if (!mem->memory_size) 1597 return kvm_delete_memslot(kvm, mem, &old, as_id); 1598 1599 new.as_id = as_id; 1600 new.id = id; 1601 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 1602 new.npages = mem->memory_size >> PAGE_SHIFT; 1603 new.flags = mem->flags; 1604 new.userspace_addr = mem->userspace_addr; 1605 1606 if (new.npages > KVM_MEM_MAX_NR_PAGES) 1607 return -EINVAL; 1608 1609 if (!old.npages) { 1610 change = KVM_MR_CREATE; 1611 new.dirty_bitmap = NULL; 1612 memset(&new.arch, 0, sizeof(new.arch)); 1613 } else { /* Modify an existing slot. */ 1614 if ((new.userspace_addr != old.userspace_addr) || 1615 (new.npages != old.npages) || 1616 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 1617 return -EINVAL; 1618 1619 if (new.base_gfn != old.base_gfn) 1620 change = KVM_MR_MOVE; 1621 else if (new.flags != old.flags) 1622 change = KVM_MR_FLAGS_ONLY; 1623 else /* Nothing to change. */ 1624 return 0; 1625 1626 /* Copy dirty_bitmap and arch from the current memslot. */ 1627 new.dirty_bitmap = old.dirty_bitmap; 1628 memcpy(&new.arch, &old.arch, sizeof(new.arch)); 1629 } 1630 1631 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1632 /* Check for overlaps */ 1633 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { 1634 if (tmp->id == id) 1635 continue; 1636 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || 1637 (new.base_gfn >= tmp->base_gfn + tmp->npages))) 1638 return -EEXIST; 1639 } 1640 } 1641 1642 /* Allocate/free page dirty bitmap as needed */ 1643 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 1644 new.dirty_bitmap = NULL; 1645 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { 1646 r = kvm_alloc_dirty_bitmap(&new); 1647 if (r) 1648 return r; 1649 1650 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 1651 bitmap_set(new.dirty_bitmap, 0, new.npages); 1652 } 1653 1654 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); 1655 if (r) 1656 goto out_bitmap; 1657 1658 if (old.dirty_bitmap && !new.dirty_bitmap) 1659 kvm_destroy_dirty_bitmap(&old); 1660 return 0; 1661 1662 out_bitmap: 1663 if (new.dirty_bitmap && !old.dirty_bitmap) 1664 kvm_destroy_dirty_bitmap(&new); 1665 return r; 1666 } 1667 EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 1668 1669 int kvm_set_memory_region(struct kvm *kvm, 1670 const struct kvm_userspace_memory_region *mem) 1671 { 1672 int r; 1673 1674 mutex_lock(&kvm->slots_lock); 1675 r = __kvm_set_memory_region(kvm, mem); 1676 mutex_unlock(&kvm->slots_lock); 1677 return r; 1678 } 1679 EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1680 1681 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1682 struct kvm_userspace_memory_region *mem) 1683 { 1684 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1685 return -EINVAL; 1686 1687 return kvm_set_memory_region(kvm, mem); 1688 } 1689 1690 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1691 /** 1692 * kvm_get_dirty_log - get a snapshot of dirty pages 1693 * @kvm: pointer to kvm instance 1694 * @log: slot id and address to which we copy the log 1695 * @is_dirty: set to '1' if any dirty pages were found 1696 * @memslot: set to the associated memslot, always valid on success 1697 */ 1698 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1699 int *is_dirty, struct kvm_memory_slot **memslot) 1700 { 1701 struct kvm_memslots *slots; 1702 int i, as_id, id; 1703 unsigned long n; 1704 unsigned long any = 0; 1705 1706 /* Dirty ring tracking is exclusive to dirty log tracking */ 1707 if (kvm->dirty_ring_size) 1708 return -ENXIO; 1709 1710 *memslot = NULL; 1711 *is_dirty = 0; 1712 1713 as_id = log->slot >> 16; 1714 id = (u16)log->slot; 1715 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1716 return -EINVAL; 1717 1718 slots = __kvm_memslots(kvm, as_id); 1719 *memslot = id_to_memslot(slots, id); 1720 if (!(*memslot) || !(*memslot)->dirty_bitmap) 1721 return -ENOENT; 1722 1723 kvm_arch_sync_dirty_log(kvm, *memslot); 1724 1725 n = kvm_dirty_bitmap_bytes(*memslot); 1726 1727 for (i = 0; !any && i < n/sizeof(long); ++i) 1728 any = (*memslot)->dirty_bitmap[i]; 1729 1730 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n)) 1731 return -EFAULT; 1732 1733 if (any) 1734 *is_dirty = 1; 1735 return 0; 1736 } 1737 EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1738 1739 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1740 /** 1741 * kvm_get_dirty_log_protect - get a snapshot of dirty pages 1742 * and reenable dirty page tracking for the corresponding pages. 1743 * @kvm: pointer to kvm instance 1744 * @log: slot id and address to which we copy the log 1745 * 1746 * We need to keep it in mind that VCPU threads can write to the bitmap 1747 * concurrently. So, to avoid losing track of dirty pages we keep the 1748 * following order: 1749 * 1750 * 1. Take a snapshot of the bit and clear it if needed. 1751 * 2. Write protect the corresponding page. 1752 * 3. Copy the snapshot to the userspace. 1753 * 4. Upon return caller flushes TLB's if needed. 1754 * 1755 * Between 2 and 4, the guest may write to the page using the remaining TLB 1756 * entry. This is not a problem because the page is reported dirty using 1757 * the snapshot taken before and step 4 ensures that writes done after 1758 * exiting to userspace will be logged for the next call. 1759 * 1760 */ 1761 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) 1762 { 1763 struct kvm_memslots *slots; 1764 struct kvm_memory_slot *memslot; 1765 int i, as_id, id; 1766 unsigned long n; 1767 unsigned long *dirty_bitmap; 1768 unsigned long *dirty_bitmap_buffer; 1769 bool flush; 1770 1771 /* Dirty ring tracking is exclusive to dirty log tracking */ 1772 if (kvm->dirty_ring_size) 1773 return -ENXIO; 1774 1775 as_id = log->slot >> 16; 1776 id = (u16)log->slot; 1777 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1778 return -EINVAL; 1779 1780 slots = __kvm_memslots(kvm, as_id); 1781 memslot = id_to_memslot(slots, id); 1782 if (!memslot || !memslot->dirty_bitmap) 1783 return -ENOENT; 1784 1785 dirty_bitmap = memslot->dirty_bitmap; 1786 1787 kvm_arch_sync_dirty_log(kvm, memslot); 1788 1789 n = kvm_dirty_bitmap_bytes(memslot); 1790 flush = false; 1791 if (kvm->manual_dirty_log_protect) { 1792 /* 1793 * Unlike kvm_get_dirty_log, we always return false in *flush, 1794 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There 1795 * is some code duplication between this function and 1796 * kvm_get_dirty_log, but hopefully all architecture 1797 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log 1798 * can be eliminated. 1799 */ 1800 dirty_bitmap_buffer = dirty_bitmap; 1801 } else { 1802 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1803 memset(dirty_bitmap_buffer, 0, n); 1804 1805 KVM_MMU_LOCK(kvm); 1806 for (i = 0; i < n / sizeof(long); i++) { 1807 unsigned long mask; 1808 gfn_t offset; 1809 1810 if (!dirty_bitmap[i]) 1811 continue; 1812 1813 flush = true; 1814 mask = xchg(&dirty_bitmap[i], 0); 1815 dirty_bitmap_buffer[i] = mask; 1816 1817 offset = i * BITS_PER_LONG; 1818 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1819 offset, mask); 1820 } 1821 KVM_MMU_UNLOCK(kvm); 1822 } 1823 1824 if (flush) 1825 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1826 1827 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1828 return -EFAULT; 1829 return 0; 1830 } 1831 1832 1833 /** 1834 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot 1835 * @kvm: kvm instance 1836 * @log: slot id and address to which we copy the log 1837 * 1838 * Steps 1-4 below provide general overview of dirty page logging. See 1839 * kvm_get_dirty_log_protect() function description for additional details. 1840 * 1841 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 1842 * always flush the TLB (step 4) even if previous step failed and the dirty 1843 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 1844 * does not preclude user space subsequent dirty log read. Flushing TLB ensures 1845 * writes will be marked dirty for next log read. 1846 * 1847 * 1. Take a snapshot of the bit and clear it if needed. 1848 * 2. Write protect the corresponding page. 1849 * 3. Copy the snapshot to the userspace. 1850 * 4. Flush TLB's if needed. 1851 */ 1852 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1853 struct kvm_dirty_log *log) 1854 { 1855 int r; 1856 1857 mutex_lock(&kvm->slots_lock); 1858 1859 r = kvm_get_dirty_log_protect(kvm, log); 1860 1861 mutex_unlock(&kvm->slots_lock); 1862 return r; 1863 } 1864 1865 /** 1866 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap 1867 * and reenable dirty page tracking for the corresponding pages. 1868 * @kvm: pointer to kvm instance 1869 * @log: slot id and address from which to fetch the bitmap of dirty pages 1870 */ 1871 static int kvm_clear_dirty_log_protect(struct kvm *kvm, 1872 struct kvm_clear_dirty_log *log) 1873 { 1874 struct kvm_memslots *slots; 1875 struct kvm_memory_slot *memslot; 1876 int as_id, id; 1877 gfn_t offset; 1878 unsigned long i, n; 1879 unsigned long *dirty_bitmap; 1880 unsigned long *dirty_bitmap_buffer; 1881 bool flush; 1882 1883 /* Dirty ring tracking is exclusive to dirty log tracking */ 1884 if (kvm->dirty_ring_size) 1885 return -ENXIO; 1886 1887 as_id = log->slot >> 16; 1888 id = (u16)log->slot; 1889 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1890 return -EINVAL; 1891 1892 if (log->first_page & 63) 1893 return -EINVAL; 1894 1895 slots = __kvm_memslots(kvm, as_id); 1896 memslot = id_to_memslot(slots, id); 1897 if (!memslot || !memslot->dirty_bitmap) 1898 return -ENOENT; 1899 1900 dirty_bitmap = memslot->dirty_bitmap; 1901 1902 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; 1903 1904 if (log->first_page > memslot->npages || 1905 log->num_pages > memslot->npages - log->first_page || 1906 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) 1907 return -EINVAL; 1908 1909 kvm_arch_sync_dirty_log(kvm, memslot); 1910 1911 flush = false; 1912 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1913 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 1914 return -EFAULT; 1915 1916 KVM_MMU_LOCK(kvm); 1917 for (offset = log->first_page, i = offset / BITS_PER_LONG, 1918 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; 1919 i++, offset += BITS_PER_LONG) { 1920 unsigned long mask = *dirty_bitmap_buffer++; 1921 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i]; 1922 if (!mask) 1923 continue; 1924 1925 mask &= atomic_long_fetch_andnot(mask, p); 1926 1927 /* 1928 * mask contains the bits that really have been cleared. This 1929 * never includes any bits beyond the length of the memslot (if 1930 * the length is not aligned to 64 pages), therefore it is not 1931 * a problem if userspace sets them in log->dirty_bitmap. 1932 */ 1933 if (mask) { 1934 flush = true; 1935 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1936 offset, mask); 1937 } 1938 } 1939 KVM_MMU_UNLOCK(kvm); 1940 1941 if (flush) 1942 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 1943 1944 return 0; 1945 } 1946 1947 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, 1948 struct kvm_clear_dirty_log *log) 1949 { 1950 int r; 1951 1952 mutex_lock(&kvm->slots_lock); 1953 1954 r = kvm_clear_dirty_log_protect(kvm, log); 1955 1956 mutex_unlock(&kvm->slots_lock); 1957 return r; 1958 } 1959 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ 1960 1961 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1962 { 1963 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1964 } 1965 EXPORT_SYMBOL_GPL(gfn_to_memslot); 1966 1967 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1968 { 1969 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1970 } 1971 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); 1972 1973 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1974 { 1975 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1976 1977 return kvm_is_visible_memslot(memslot); 1978 } 1979 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1980 1981 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1982 { 1983 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1984 1985 return kvm_is_visible_memslot(memslot); 1986 } 1987 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); 1988 1989 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) 1990 { 1991 struct vm_area_struct *vma; 1992 unsigned long addr, size; 1993 1994 size = PAGE_SIZE; 1995 1996 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); 1997 if (kvm_is_error_hva(addr)) 1998 return PAGE_SIZE; 1999 2000 mmap_read_lock(current->mm); 2001 vma = find_vma(current->mm, addr); 2002 if (!vma) 2003 goto out; 2004 2005 size = vma_kernel_pagesize(vma); 2006 2007 out: 2008 mmap_read_unlock(current->mm); 2009 2010 return size; 2011 } 2012 2013 static bool memslot_is_readonly(struct kvm_memory_slot *slot) 2014 { 2015 return slot->flags & KVM_MEM_READONLY; 2016 } 2017 2018 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2019 gfn_t *nr_pages, bool write) 2020 { 2021 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 2022 return KVM_HVA_ERR_BAD; 2023 2024 if (memslot_is_readonly(slot) && write) 2025 return KVM_HVA_ERR_RO_BAD; 2026 2027 if (nr_pages) 2028 *nr_pages = slot->npages - (gfn - slot->base_gfn); 2029 2030 return __gfn_to_hva_memslot(slot, gfn); 2031 } 2032 2033 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 2034 gfn_t *nr_pages) 2035 { 2036 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 2037 } 2038 2039 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 2040 gfn_t gfn) 2041 { 2042 return gfn_to_hva_many(slot, gfn, NULL); 2043 } 2044 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 2045 2046 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 2047 { 2048 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 2049 } 2050 EXPORT_SYMBOL_GPL(gfn_to_hva); 2051 2052 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 2053 { 2054 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 2055 } 2056 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 2057 2058 /* 2059 * Return the hva of a @gfn and the R/W attribute if possible. 2060 * 2061 * @slot: the kvm_memory_slot which contains @gfn 2062 * @gfn: the gfn to be translated 2063 * @writable: used to return the read/write attribute of the @slot if the hva 2064 * is valid and @writable is not NULL 2065 */ 2066 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 2067 gfn_t gfn, bool *writable) 2068 { 2069 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 2070 2071 if (!kvm_is_error_hva(hva) && writable) 2072 *writable = !memslot_is_readonly(slot); 2073 2074 return hva; 2075 } 2076 2077 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 2078 { 2079 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2080 2081 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2082 } 2083 2084 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 2085 { 2086 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2087 2088 return gfn_to_hva_memslot_prot(slot, gfn, writable); 2089 } 2090 2091 static inline int check_user_page_hwpoison(unsigned long addr) 2092 { 2093 int rc, flags = FOLL_HWPOISON | FOLL_WRITE; 2094 2095 rc = get_user_pages(addr, 1, flags, NULL, NULL); 2096 return rc == -EHWPOISON; 2097 } 2098 2099 /* 2100 * The fast path to get the writable pfn which will be stored in @pfn, 2101 * true indicates success, otherwise false is returned. It's also the 2102 * only part that runs if we can in atomic context. 2103 */ 2104 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, 2105 bool *writable, kvm_pfn_t *pfn) 2106 { 2107 struct page *page[1]; 2108 2109 /* 2110 * Fast pin a writable pfn only if it is a write fault request 2111 * or the caller allows to map a writable pfn for a read fault 2112 * request. 2113 */ 2114 if (!(write_fault || writable)) 2115 return false; 2116 2117 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { 2118 *pfn = page_to_pfn(page[0]); 2119 2120 if (writable) 2121 *writable = true; 2122 return true; 2123 } 2124 2125 return false; 2126 } 2127 2128 /* 2129 * The slow path to get the pfn of the specified host virtual address, 2130 * 1 indicates success, -errno is returned if error is detected. 2131 */ 2132 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 2133 bool *writable, kvm_pfn_t *pfn) 2134 { 2135 unsigned int flags = FOLL_HWPOISON; 2136 struct page *page; 2137 int npages = 0; 2138 2139 might_sleep(); 2140 2141 if (writable) 2142 *writable = write_fault; 2143 2144 if (write_fault) 2145 flags |= FOLL_WRITE; 2146 if (async) 2147 flags |= FOLL_NOWAIT; 2148 2149 npages = get_user_pages_unlocked(addr, 1, &page, flags); 2150 if (npages != 1) 2151 return npages; 2152 2153 /* map read fault as writable if possible */ 2154 if (unlikely(!write_fault) && writable) { 2155 struct page *wpage; 2156 2157 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { 2158 *writable = true; 2159 put_page(page); 2160 page = wpage; 2161 } 2162 } 2163 *pfn = page_to_pfn(page); 2164 return npages; 2165 } 2166 2167 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 2168 { 2169 if (unlikely(!(vma->vm_flags & VM_READ))) 2170 return false; 2171 2172 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 2173 return false; 2174 2175 return true; 2176 } 2177 2178 static int kvm_try_get_pfn(kvm_pfn_t pfn) 2179 { 2180 if (kvm_is_reserved_pfn(pfn)) 2181 return 1; 2182 return get_page_unless_zero(pfn_to_page(pfn)); 2183 } 2184 2185 static int hva_to_pfn_remapped(struct vm_area_struct *vma, 2186 unsigned long addr, bool *async, 2187 bool write_fault, bool *writable, 2188 kvm_pfn_t *p_pfn) 2189 { 2190 kvm_pfn_t pfn; 2191 pte_t *ptep; 2192 spinlock_t *ptl; 2193 int r; 2194 2195 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2196 if (r) { 2197 /* 2198 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does 2199 * not call the fault handler, so do it here. 2200 */ 2201 bool unlocked = false; 2202 r = fixup_user_fault(current->mm, addr, 2203 (write_fault ? FAULT_FLAG_WRITE : 0), 2204 &unlocked); 2205 if (unlocked) 2206 return -EAGAIN; 2207 if (r) 2208 return r; 2209 2210 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); 2211 if (r) 2212 return r; 2213 } 2214 2215 if (write_fault && !pte_write(*ptep)) { 2216 pfn = KVM_PFN_ERR_RO_FAULT; 2217 goto out; 2218 } 2219 2220 if (writable) 2221 *writable = pte_write(*ptep); 2222 pfn = pte_pfn(*ptep); 2223 2224 /* 2225 * Get a reference here because callers of *hva_to_pfn* and 2226 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the 2227 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP 2228 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will 2229 * simply do nothing for reserved pfns. 2230 * 2231 * Whoever called remap_pfn_range is also going to call e.g. 2232 * unmap_mapping_range before the underlying pages are freed, 2233 * causing a call to our MMU notifier. 2234 * 2235 * Certain IO or PFNMAP mappings can be backed with valid 2236 * struct pages, but be allocated without refcounting e.g., 2237 * tail pages of non-compound higher order allocations, which 2238 * would then underflow the refcount when the caller does the 2239 * required put_page. Don't allow those pages here. 2240 */ 2241 if (!kvm_try_get_pfn(pfn)) 2242 r = -EFAULT; 2243 2244 out: 2245 pte_unmap_unlock(ptep, ptl); 2246 *p_pfn = pfn; 2247 2248 return r; 2249 } 2250 2251 /* 2252 * Pin guest page in memory and return its pfn. 2253 * @addr: host virtual address which maps memory to the guest 2254 * @atomic: whether this function can sleep 2255 * @async: whether this function need to wait IO complete if the 2256 * host page is not in the memory 2257 * @write_fault: whether we should get a writable host page 2258 * @writable: whether it allows to map a writable host page for !@write_fault 2259 * 2260 * The function will map a writable host page for these two cases: 2261 * 1): @write_fault = true 2262 * 2): @write_fault = false && @writable, @writable will tell the caller 2263 * whether the mapping is writable. 2264 */ 2265 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 2266 bool write_fault, bool *writable) 2267 { 2268 struct vm_area_struct *vma; 2269 kvm_pfn_t pfn = 0; 2270 int npages, r; 2271 2272 /* we can do it either atomically or asynchronously, not both */ 2273 BUG_ON(atomic && async); 2274 2275 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) 2276 return pfn; 2277 2278 if (atomic) 2279 return KVM_PFN_ERR_FAULT; 2280 2281 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 2282 if (npages == 1) 2283 return pfn; 2284 2285 mmap_read_lock(current->mm); 2286 if (npages == -EHWPOISON || 2287 (!async && check_user_page_hwpoison(addr))) { 2288 pfn = KVM_PFN_ERR_HWPOISON; 2289 goto exit; 2290 } 2291 2292 retry: 2293 vma = vma_lookup(current->mm, addr); 2294 2295 if (vma == NULL) 2296 pfn = KVM_PFN_ERR_FAULT; 2297 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { 2298 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); 2299 if (r == -EAGAIN) 2300 goto retry; 2301 if (r < 0) 2302 pfn = KVM_PFN_ERR_FAULT; 2303 } else { 2304 if (async && vma_is_valid(vma, write_fault)) 2305 *async = true; 2306 pfn = KVM_PFN_ERR_FAULT; 2307 } 2308 exit: 2309 mmap_read_unlock(current->mm); 2310 return pfn; 2311 } 2312 2313 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, 2314 bool atomic, bool *async, bool write_fault, 2315 bool *writable, hva_t *hva) 2316 { 2317 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 2318 2319 if (hva) 2320 *hva = addr; 2321 2322 if (addr == KVM_HVA_ERR_RO_BAD) { 2323 if (writable) 2324 *writable = false; 2325 return KVM_PFN_ERR_RO_FAULT; 2326 } 2327 2328 if (kvm_is_error_hva(addr)) { 2329 if (writable) 2330 *writable = false; 2331 return KVM_PFN_NOSLOT; 2332 } 2333 2334 /* Do not map writable pfn in the readonly memslot. */ 2335 if (writable && memslot_is_readonly(slot)) { 2336 *writable = false; 2337 writable = NULL; 2338 } 2339 2340 return hva_to_pfn(addr, atomic, async, write_fault, 2341 writable); 2342 } 2343 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 2344 2345 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 2346 bool *writable) 2347 { 2348 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 2349 write_fault, writable, NULL); 2350 } 2351 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 2352 2353 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 2354 { 2355 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); 2356 } 2357 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 2358 2359 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 2360 { 2361 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); 2362 } 2363 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 2364 2365 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 2366 { 2367 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2368 } 2369 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 2370 2371 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 2372 { 2373 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 2374 } 2375 EXPORT_SYMBOL_GPL(gfn_to_pfn); 2376 2377 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 2378 { 2379 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 2380 } 2381 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 2382 2383 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2384 struct page **pages, int nr_pages) 2385 { 2386 unsigned long addr; 2387 gfn_t entry = 0; 2388 2389 addr = gfn_to_hva_many(slot, gfn, &entry); 2390 if (kvm_is_error_hva(addr)) 2391 return -1; 2392 2393 if (entry < nr_pages) 2394 return 0; 2395 2396 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); 2397 } 2398 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 2399 2400 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn) 2401 { 2402 if (is_error_noslot_pfn(pfn)) 2403 return KVM_ERR_PTR_BAD_PAGE; 2404 2405 if (kvm_is_reserved_pfn(pfn)) { 2406 WARN_ON(1); 2407 return KVM_ERR_PTR_BAD_PAGE; 2408 } 2409 2410 return pfn_to_page(pfn); 2411 } 2412 2413 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 2414 { 2415 kvm_pfn_t pfn; 2416 2417 pfn = gfn_to_pfn(kvm, gfn); 2418 2419 return kvm_pfn_to_page(pfn); 2420 } 2421 EXPORT_SYMBOL_GPL(gfn_to_page); 2422 2423 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) 2424 { 2425 if (pfn == 0) 2426 return; 2427 2428 if (cache) 2429 cache->pfn = cache->gfn = 0; 2430 2431 if (dirty) 2432 kvm_release_pfn_dirty(pfn); 2433 else 2434 kvm_release_pfn_clean(pfn); 2435 } 2436 2437 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, 2438 struct gfn_to_pfn_cache *cache, u64 gen) 2439 { 2440 kvm_release_pfn(cache->pfn, cache->dirty, cache); 2441 2442 cache->pfn = gfn_to_pfn_memslot(slot, gfn); 2443 cache->gfn = gfn; 2444 cache->dirty = false; 2445 cache->generation = gen; 2446 } 2447 2448 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, 2449 struct kvm_host_map *map, 2450 struct gfn_to_pfn_cache *cache, 2451 bool atomic) 2452 { 2453 kvm_pfn_t pfn; 2454 void *hva = NULL; 2455 struct page *page = KVM_UNMAPPED_PAGE; 2456 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); 2457 u64 gen = slots->generation; 2458 2459 if (!map) 2460 return -EINVAL; 2461 2462 if (cache) { 2463 if (!cache->pfn || cache->gfn != gfn || 2464 cache->generation != gen) { 2465 if (atomic) 2466 return -EAGAIN; 2467 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); 2468 } 2469 pfn = cache->pfn; 2470 } else { 2471 if (atomic) 2472 return -EAGAIN; 2473 pfn = gfn_to_pfn_memslot(slot, gfn); 2474 } 2475 if (is_error_noslot_pfn(pfn)) 2476 return -EINVAL; 2477 2478 if (pfn_valid(pfn)) { 2479 page = pfn_to_page(pfn); 2480 if (atomic) 2481 hva = kmap_atomic(page); 2482 else 2483 hva = kmap(page); 2484 #ifdef CONFIG_HAS_IOMEM 2485 } else if (!atomic) { 2486 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 2487 } else { 2488 return -EINVAL; 2489 #endif 2490 } 2491 2492 if (!hva) 2493 return -EFAULT; 2494 2495 map->page = page; 2496 map->hva = hva; 2497 map->pfn = pfn; 2498 map->gfn = gfn; 2499 2500 return 0; 2501 } 2502 2503 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, 2504 struct gfn_to_pfn_cache *cache, bool atomic) 2505 { 2506 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, 2507 cache, atomic); 2508 } 2509 EXPORT_SYMBOL_GPL(kvm_map_gfn); 2510 2511 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) 2512 { 2513 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, 2514 NULL, false); 2515 } 2516 EXPORT_SYMBOL_GPL(kvm_vcpu_map); 2517 2518 static void __kvm_unmap_gfn(struct kvm *kvm, 2519 struct kvm_memory_slot *memslot, 2520 struct kvm_host_map *map, 2521 struct gfn_to_pfn_cache *cache, 2522 bool dirty, bool atomic) 2523 { 2524 if (!map) 2525 return; 2526 2527 if (!map->hva) 2528 return; 2529 2530 if (map->page != KVM_UNMAPPED_PAGE) { 2531 if (atomic) 2532 kunmap_atomic(map->hva); 2533 else 2534 kunmap(map->page); 2535 } 2536 #ifdef CONFIG_HAS_IOMEM 2537 else if (!atomic) 2538 memunmap(map->hva); 2539 else 2540 WARN_ONCE(1, "Unexpected unmapping in atomic context"); 2541 #endif 2542 2543 if (dirty) 2544 mark_page_dirty_in_slot(kvm, memslot, map->gfn); 2545 2546 if (cache) 2547 cache->dirty |= dirty; 2548 else 2549 kvm_release_pfn(map->pfn, dirty, NULL); 2550 2551 map->hva = NULL; 2552 map->page = NULL; 2553 } 2554 2555 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 2556 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) 2557 { 2558 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, 2559 cache, dirty, atomic); 2560 return 0; 2561 } 2562 EXPORT_SYMBOL_GPL(kvm_unmap_gfn); 2563 2564 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) 2565 { 2566 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), 2567 map, NULL, dirty, false); 2568 } 2569 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); 2570 2571 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 2572 { 2573 kvm_pfn_t pfn; 2574 2575 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 2576 2577 return kvm_pfn_to_page(pfn); 2578 } 2579 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 2580 2581 void kvm_release_page_clean(struct page *page) 2582 { 2583 WARN_ON(is_error_page(page)); 2584 2585 kvm_release_pfn_clean(page_to_pfn(page)); 2586 } 2587 EXPORT_SYMBOL_GPL(kvm_release_page_clean); 2588 2589 void kvm_release_pfn_clean(kvm_pfn_t pfn) 2590 { 2591 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 2592 put_page(pfn_to_page(pfn)); 2593 } 2594 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 2595 2596 void kvm_release_page_dirty(struct page *page) 2597 { 2598 WARN_ON(is_error_page(page)); 2599 2600 kvm_release_pfn_dirty(page_to_pfn(page)); 2601 } 2602 EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 2603 2604 void kvm_release_pfn_dirty(kvm_pfn_t pfn) 2605 { 2606 kvm_set_pfn_dirty(pfn); 2607 kvm_release_pfn_clean(pfn); 2608 } 2609 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); 2610 2611 void kvm_set_pfn_dirty(kvm_pfn_t pfn) 2612 { 2613 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2614 SetPageDirty(pfn_to_page(pfn)); 2615 } 2616 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 2617 2618 void kvm_set_pfn_accessed(kvm_pfn_t pfn) 2619 { 2620 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) 2621 mark_page_accessed(pfn_to_page(pfn)); 2622 } 2623 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 2624 2625 void kvm_get_pfn(kvm_pfn_t pfn) 2626 { 2627 if (!kvm_is_reserved_pfn(pfn)) 2628 get_page(pfn_to_page(pfn)); 2629 } 2630 EXPORT_SYMBOL_GPL(kvm_get_pfn); 2631 2632 static int next_segment(unsigned long len, int offset) 2633 { 2634 if (len > PAGE_SIZE - offset) 2635 return PAGE_SIZE - offset; 2636 else 2637 return len; 2638 } 2639 2640 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 2641 void *data, int offset, int len) 2642 { 2643 int r; 2644 unsigned long addr; 2645 2646 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2647 if (kvm_is_error_hva(addr)) 2648 return -EFAULT; 2649 r = __copy_from_user(data, (void __user *)addr + offset, len); 2650 if (r) 2651 return -EFAULT; 2652 return 0; 2653 } 2654 2655 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 2656 int len) 2657 { 2658 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2659 2660 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2661 } 2662 EXPORT_SYMBOL_GPL(kvm_read_guest_page); 2663 2664 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 2665 int offset, int len) 2666 { 2667 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2668 2669 return __kvm_read_guest_page(slot, gfn, data, offset, len); 2670 } 2671 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 2672 2673 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 2674 { 2675 gfn_t gfn = gpa >> PAGE_SHIFT; 2676 int seg; 2677 int offset = offset_in_page(gpa); 2678 int ret; 2679 2680 while ((seg = next_segment(len, offset)) != 0) { 2681 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 2682 if (ret < 0) 2683 return ret; 2684 offset = 0; 2685 len -= seg; 2686 data += seg; 2687 ++gfn; 2688 } 2689 return 0; 2690 } 2691 EXPORT_SYMBOL_GPL(kvm_read_guest); 2692 2693 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 2694 { 2695 gfn_t gfn = gpa >> PAGE_SHIFT; 2696 int seg; 2697 int offset = offset_in_page(gpa); 2698 int ret; 2699 2700 while ((seg = next_segment(len, offset)) != 0) { 2701 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 2702 if (ret < 0) 2703 return ret; 2704 offset = 0; 2705 len -= seg; 2706 data += seg; 2707 ++gfn; 2708 } 2709 return 0; 2710 } 2711 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 2712 2713 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 2714 void *data, int offset, unsigned long len) 2715 { 2716 int r; 2717 unsigned long addr; 2718 2719 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 2720 if (kvm_is_error_hva(addr)) 2721 return -EFAULT; 2722 pagefault_disable(); 2723 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 2724 pagefault_enable(); 2725 if (r) 2726 return -EFAULT; 2727 return 0; 2728 } 2729 2730 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 2731 void *data, unsigned long len) 2732 { 2733 gfn_t gfn = gpa >> PAGE_SHIFT; 2734 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2735 int offset = offset_in_page(gpa); 2736 2737 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 2738 } 2739 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 2740 2741 static int __kvm_write_guest_page(struct kvm *kvm, 2742 struct kvm_memory_slot *memslot, gfn_t gfn, 2743 const void *data, int offset, int len) 2744 { 2745 int r; 2746 unsigned long addr; 2747 2748 addr = gfn_to_hva_memslot(memslot, gfn); 2749 if (kvm_is_error_hva(addr)) 2750 return -EFAULT; 2751 r = __copy_to_user((void __user *)addr + offset, data, len); 2752 if (r) 2753 return -EFAULT; 2754 mark_page_dirty_in_slot(kvm, memslot, gfn); 2755 return 0; 2756 } 2757 2758 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 2759 const void *data, int offset, int len) 2760 { 2761 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 2762 2763 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); 2764 } 2765 EXPORT_SYMBOL_GPL(kvm_write_guest_page); 2766 2767 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 2768 const void *data, int offset, int len) 2769 { 2770 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2771 2772 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); 2773 } 2774 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 2775 2776 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 2777 unsigned long len) 2778 { 2779 gfn_t gfn = gpa >> PAGE_SHIFT; 2780 int seg; 2781 int offset = offset_in_page(gpa); 2782 int ret; 2783 2784 while ((seg = next_segment(len, offset)) != 0) { 2785 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 2786 if (ret < 0) 2787 return ret; 2788 offset = 0; 2789 len -= seg; 2790 data += seg; 2791 ++gfn; 2792 } 2793 return 0; 2794 } 2795 EXPORT_SYMBOL_GPL(kvm_write_guest); 2796 2797 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 2798 unsigned long len) 2799 { 2800 gfn_t gfn = gpa >> PAGE_SHIFT; 2801 int seg; 2802 int offset = offset_in_page(gpa); 2803 int ret; 2804 2805 while ((seg = next_segment(len, offset)) != 0) { 2806 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 2807 if (ret < 0) 2808 return ret; 2809 offset = 0; 2810 len -= seg; 2811 data += seg; 2812 ++gfn; 2813 } 2814 return 0; 2815 } 2816 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 2817 2818 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, 2819 struct gfn_to_hva_cache *ghc, 2820 gpa_t gpa, unsigned long len) 2821 { 2822 int offset = offset_in_page(gpa); 2823 gfn_t start_gfn = gpa >> PAGE_SHIFT; 2824 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 2825 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 2826 gfn_t nr_pages_avail; 2827 2828 /* Update ghc->generation before performing any error checks. */ 2829 ghc->generation = slots->generation; 2830 2831 if (start_gfn > end_gfn) { 2832 ghc->hva = KVM_HVA_ERR_BAD; 2833 return -EINVAL; 2834 } 2835 2836 /* 2837 * If the requested region crosses two memslots, we still 2838 * verify that the entire region is valid here. 2839 */ 2840 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { 2841 ghc->memslot = __gfn_to_memslot(slots, start_gfn); 2842 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 2843 &nr_pages_avail); 2844 if (kvm_is_error_hva(ghc->hva)) 2845 return -EFAULT; 2846 } 2847 2848 /* Use the slow path for cross page reads and writes. */ 2849 if (nr_pages_needed == 1) 2850 ghc->hva += offset; 2851 else 2852 ghc->memslot = NULL; 2853 2854 ghc->gpa = gpa; 2855 ghc->len = len; 2856 return 0; 2857 } 2858 2859 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2860 gpa_t gpa, unsigned long len) 2861 { 2862 struct kvm_memslots *slots = kvm_memslots(kvm); 2863 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); 2864 } 2865 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 2866 2867 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2868 void *data, unsigned int offset, 2869 unsigned long len) 2870 { 2871 struct kvm_memslots *slots = kvm_memslots(kvm); 2872 int r; 2873 gpa_t gpa = ghc->gpa + offset; 2874 2875 BUG_ON(len + offset > ghc->len); 2876 2877 if (slots->generation != ghc->generation) { 2878 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2879 return -EFAULT; 2880 } 2881 2882 if (kvm_is_error_hva(ghc->hva)) 2883 return -EFAULT; 2884 2885 if (unlikely(!ghc->memslot)) 2886 return kvm_write_guest(kvm, gpa, data, len); 2887 2888 r = __copy_to_user((void __user *)ghc->hva + offset, data, len); 2889 if (r) 2890 return -EFAULT; 2891 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); 2892 2893 return 0; 2894 } 2895 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); 2896 2897 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2898 void *data, unsigned long len) 2899 { 2900 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); 2901 } 2902 EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 2903 2904 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2905 void *data, unsigned int offset, 2906 unsigned long len) 2907 { 2908 struct kvm_memslots *slots = kvm_memslots(kvm); 2909 int r; 2910 gpa_t gpa = ghc->gpa + offset; 2911 2912 BUG_ON(len + offset > ghc->len); 2913 2914 if (slots->generation != ghc->generation) { 2915 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) 2916 return -EFAULT; 2917 } 2918 2919 if (kvm_is_error_hva(ghc->hva)) 2920 return -EFAULT; 2921 2922 if (unlikely(!ghc->memslot)) 2923 return kvm_read_guest(kvm, gpa, data, len); 2924 2925 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len); 2926 if (r) 2927 return -EFAULT; 2928 2929 return 0; 2930 } 2931 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached); 2932 2933 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 2934 void *data, unsigned long len) 2935 { 2936 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); 2937 } 2938 EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 2939 2940 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 2941 { 2942 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 2943 gfn_t gfn = gpa >> PAGE_SHIFT; 2944 int seg; 2945 int offset = offset_in_page(gpa); 2946 int ret; 2947 2948 while ((seg = next_segment(len, offset)) != 0) { 2949 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 2950 if (ret < 0) 2951 return ret; 2952 offset = 0; 2953 len -= seg; 2954 ++gfn; 2955 } 2956 return 0; 2957 } 2958 EXPORT_SYMBOL_GPL(kvm_clear_guest); 2959 2960 void mark_page_dirty_in_slot(struct kvm *kvm, 2961 struct kvm_memory_slot *memslot, 2962 gfn_t gfn) 2963 { 2964 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { 2965 unsigned long rel_gfn = gfn - memslot->base_gfn; 2966 u32 slot = (memslot->as_id << 16) | memslot->id; 2967 2968 if (kvm->dirty_ring_size) 2969 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), 2970 slot, rel_gfn); 2971 else 2972 set_bit_le(rel_gfn, memslot->dirty_bitmap); 2973 } 2974 } 2975 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot); 2976 2977 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 2978 { 2979 struct kvm_memory_slot *memslot; 2980 2981 memslot = gfn_to_memslot(kvm, gfn); 2982 mark_page_dirty_in_slot(kvm, memslot, gfn); 2983 } 2984 EXPORT_SYMBOL_GPL(mark_page_dirty); 2985 2986 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 2987 { 2988 struct kvm_memory_slot *memslot; 2989 2990 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 2991 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); 2992 } 2993 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 2994 2995 void kvm_sigset_activate(struct kvm_vcpu *vcpu) 2996 { 2997 if (!vcpu->sigset_active) 2998 return; 2999 3000 /* 3001 * This does a lockless modification of ->real_blocked, which is fine 3002 * because, only current can change ->real_blocked and all readers of 3003 * ->real_blocked don't care as long ->real_blocked is always a subset 3004 * of ->blocked. 3005 */ 3006 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); 3007 } 3008 3009 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) 3010 { 3011 if (!vcpu->sigset_active) 3012 return; 3013 3014 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); 3015 sigemptyset(¤t->real_blocked); 3016 } 3017 3018 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 3019 { 3020 unsigned int old, val, grow, grow_start; 3021 3022 old = val = vcpu->halt_poll_ns; 3023 grow_start = READ_ONCE(halt_poll_ns_grow_start); 3024 grow = READ_ONCE(halt_poll_ns_grow); 3025 if (!grow) 3026 goto out; 3027 3028 val *= grow; 3029 if (val < grow_start) 3030 val = grow_start; 3031 3032 if (val > vcpu->kvm->max_halt_poll_ns) 3033 val = vcpu->kvm->max_halt_poll_ns; 3034 3035 vcpu->halt_poll_ns = val; 3036 out: 3037 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 3038 } 3039 3040 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3041 { 3042 unsigned int old, val, shrink; 3043 3044 old = val = vcpu->halt_poll_ns; 3045 shrink = READ_ONCE(halt_poll_ns_shrink); 3046 if (shrink == 0) 3047 val = 0; 3048 else 3049 val /= shrink; 3050 3051 vcpu->halt_poll_ns = val; 3052 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 3053 } 3054 3055 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 3056 { 3057 int ret = -EINTR; 3058 int idx = srcu_read_lock(&vcpu->kvm->srcu); 3059 3060 if (kvm_arch_vcpu_runnable(vcpu)) { 3061 kvm_make_request(KVM_REQ_UNHALT, vcpu); 3062 goto out; 3063 } 3064 if (kvm_cpu_has_pending_timer(vcpu)) 3065 goto out; 3066 if (signal_pending(current)) 3067 goto out; 3068 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) 3069 goto out; 3070 3071 ret = 0; 3072 out: 3073 srcu_read_unlock(&vcpu->kvm->srcu, idx); 3074 return ret; 3075 } 3076 3077 static inline void 3078 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) 3079 { 3080 if (waited) 3081 vcpu->stat.generic.halt_poll_fail_ns += poll_ns; 3082 else 3083 vcpu->stat.generic.halt_poll_success_ns += poll_ns; 3084 } 3085 3086 /* 3087 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 3088 */ 3089 void kvm_vcpu_block(struct kvm_vcpu *vcpu) 3090 { 3091 ktime_t start, cur, poll_end; 3092 bool waited = false; 3093 u64 block_ns; 3094 3095 kvm_arch_vcpu_blocking(vcpu); 3096 3097 start = cur = poll_end = ktime_get(); 3098 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { 3099 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 3100 3101 ++vcpu->stat.generic.halt_attempted_poll; 3102 do { 3103 /* 3104 * This sets KVM_REQ_UNHALT if an interrupt 3105 * arrives. 3106 */ 3107 if (kvm_vcpu_check_block(vcpu) < 0) { 3108 ++vcpu->stat.generic.halt_successful_poll; 3109 if (!vcpu_valid_wakeup(vcpu)) 3110 ++vcpu->stat.generic.halt_poll_invalid; 3111 goto out; 3112 } 3113 poll_end = cur = ktime_get(); 3114 } while (kvm_vcpu_can_poll(cur, stop)); 3115 } 3116 3117 prepare_to_rcuwait(&vcpu->wait); 3118 for (;;) { 3119 set_current_state(TASK_INTERRUPTIBLE); 3120 3121 if (kvm_vcpu_check_block(vcpu) < 0) 3122 break; 3123 3124 waited = true; 3125 schedule(); 3126 } 3127 finish_rcuwait(&vcpu->wait); 3128 cur = ktime_get(); 3129 out: 3130 kvm_arch_vcpu_unblocking(vcpu); 3131 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 3132 3133 update_halt_poll_stats( 3134 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); 3135 3136 if (!kvm_arch_no_poll(vcpu)) { 3137 if (!vcpu_valid_wakeup(vcpu)) { 3138 shrink_halt_poll_ns(vcpu); 3139 } else if (vcpu->kvm->max_halt_poll_ns) { 3140 if (block_ns <= vcpu->halt_poll_ns) 3141 ; 3142 /* we had a long block, shrink polling */ 3143 else if (vcpu->halt_poll_ns && 3144 block_ns > vcpu->kvm->max_halt_poll_ns) 3145 shrink_halt_poll_ns(vcpu); 3146 /* we had a short halt and our poll time is too small */ 3147 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && 3148 block_ns < vcpu->kvm->max_halt_poll_ns) 3149 grow_halt_poll_ns(vcpu); 3150 } else { 3151 vcpu->halt_poll_ns = 0; 3152 } 3153 } 3154 3155 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); 3156 kvm_arch_vcpu_block_finish(vcpu); 3157 } 3158 EXPORT_SYMBOL_GPL(kvm_vcpu_block); 3159 3160 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 3161 { 3162 struct rcuwait *waitp; 3163 3164 waitp = kvm_arch_vcpu_get_wait(vcpu); 3165 if (rcuwait_wake_up(waitp)) { 3166 WRITE_ONCE(vcpu->ready, true); 3167 ++vcpu->stat.generic.halt_wakeup; 3168 return true; 3169 } 3170 3171 return false; 3172 } 3173 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); 3174 3175 #ifndef CONFIG_S390 3176 /* 3177 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 3178 */ 3179 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3180 { 3181 int me; 3182 int cpu = vcpu->cpu; 3183 3184 if (kvm_vcpu_wake_up(vcpu)) 3185 return; 3186 3187 me = get_cpu(); 3188 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3189 if (kvm_arch_vcpu_should_kick(vcpu)) 3190 smp_send_reschedule(cpu); 3191 put_cpu(); 3192 } 3193 EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 3194 #endif /* !CONFIG_S390 */ 3195 3196 int kvm_vcpu_yield_to(struct kvm_vcpu *target) 3197 { 3198 struct pid *pid; 3199 struct task_struct *task = NULL; 3200 int ret = 0; 3201 3202 rcu_read_lock(); 3203 pid = rcu_dereference(target->pid); 3204 if (pid) 3205 task = get_pid_task(pid, PIDTYPE_PID); 3206 rcu_read_unlock(); 3207 if (!task) 3208 return ret; 3209 ret = yield_to(task, 1); 3210 put_task_struct(task); 3211 3212 return ret; 3213 } 3214 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 3215 3216 /* 3217 * Helper that checks whether a VCPU is eligible for directed yield. 3218 * Most eligible candidate to yield is decided by following heuristics: 3219 * 3220 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 3221 * (preempted lock holder), indicated by @in_spin_loop. 3222 * Set at the beginning and cleared at the end of interception/PLE handler. 3223 * 3224 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 3225 * chance last time (mostly it has become eligible now since we have probably 3226 * yielded to lockholder in last iteration. This is done by toggling 3227 * @dy_eligible each time a VCPU checked for eligibility.) 3228 * 3229 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 3230 * to preempted lock-holder could result in wrong VCPU selection and CPU 3231 * burning. Giving priority for a potential lock-holder increases lock 3232 * progress. 3233 * 3234 * Since algorithm is based on heuristics, accessing another VCPU data without 3235 * locking does not harm. It may result in trying to yield to same VCPU, fail 3236 * and continue with next VCPU and so on. 3237 */ 3238 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 3239 { 3240 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 3241 bool eligible; 3242 3243 eligible = !vcpu->spin_loop.in_spin_loop || 3244 vcpu->spin_loop.dy_eligible; 3245 3246 if (vcpu->spin_loop.in_spin_loop) 3247 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 3248 3249 return eligible; 3250 #else 3251 return true; 3252 #endif 3253 } 3254 3255 /* 3256 * Unlike kvm_arch_vcpu_runnable, this function is called outside 3257 * a vcpu_load/vcpu_put pair. However, for most architectures 3258 * kvm_arch_vcpu_runnable does not require vcpu_load. 3259 */ 3260 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 3261 { 3262 return kvm_arch_vcpu_runnable(vcpu); 3263 } 3264 3265 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu) 3266 { 3267 if (kvm_arch_dy_runnable(vcpu)) 3268 return true; 3269 3270 #ifdef CONFIG_KVM_ASYNC_PF 3271 if (!list_empty_careful(&vcpu->async_pf.done)) 3272 return true; 3273 #endif 3274 3275 return false; 3276 } 3277 3278 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 3279 { 3280 return false; 3281 } 3282 3283 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) 3284 { 3285 struct kvm *kvm = me->kvm; 3286 struct kvm_vcpu *vcpu; 3287 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 3288 int yielded = 0; 3289 int try = 3; 3290 int pass; 3291 int i; 3292 3293 kvm_vcpu_set_in_spin_loop(me, true); 3294 /* 3295 * We boost the priority of a VCPU that is runnable but not 3296 * currently running, because it got preempted by something 3297 * else and called schedule in __vcpu_run. Hopefully that 3298 * VCPU is holding the lock that we need and will release it. 3299 * We approximate round-robin by starting at the last boosted VCPU. 3300 */ 3301 for (pass = 0; pass < 2 && !yielded && try; pass++) { 3302 kvm_for_each_vcpu(i, vcpu, kvm) { 3303 if (!pass && i <= last_boosted_vcpu) { 3304 i = last_boosted_vcpu; 3305 continue; 3306 } else if (pass && i > last_boosted_vcpu) 3307 break; 3308 if (!READ_ONCE(vcpu->ready)) 3309 continue; 3310 if (vcpu == me) 3311 continue; 3312 if (rcuwait_active(&vcpu->wait) && 3313 !vcpu_dy_runnable(vcpu)) 3314 continue; 3315 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && 3316 !kvm_arch_dy_has_pending_interrupt(vcpu) && 3317 !kvm_arch_vcpu_in_kernel(vcpu)) 3318 continue; 3319 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 3320 continue; 3321 3322 yielded = kvm_vcpu_yield_to(vcpu); 3323 if (yielded > 0) { 3324 kvm->last_boosted_vcpu = i; 3325 break; 3326 } else if (yielded < 0) { 3327 try--; 3328 if (!try) 3329 break; 3330 } 3331 } 3332 } 3333 kvm_vcpu_set_in_spin_loop(me, false); 3334 3335 /* Ensure vcpu is not eligible during next spinloop */ 3336 kvm_vcpu_set_dy_eligible(me, false); 3337 } 3338 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 3339 3340 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) 3341 { 3342 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 3343 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && 3344 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + 3345 kvm->dirty_ring_size / PAGE_SIZE); 3346 #else 3347 return false; 3348 #endif 3349 } 3350 3351 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf) 3352 { 3353 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data; 3354 struct page *page; 3355 3356 if (vmf->pgoff == 0) 3357 page = virt_to_page(vcpu->run); 3358 #ifdef CONFIG_X86 3359 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 3360 page = virt_to_page(vcpu->arch.pio_data); 3361 #endif 3362 #ifdef CONFIG_KVM_MMIO 3363 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 3364 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 3365 #endif 3366 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) 3367 page = kvm_dirty_ring_get_page( 3368 &vcpu->dirty_ring, 3369 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET); 3370 else 3371 return kvm_arch_vcpu_fault(vcpu, vmf); 3372 get_page(page); 3373 vmf->page = page; 3374 return 0; 3375 } 3376 3377 static const struct vm_operations_struct kvm_vcpu_vm_ops = { 3378 .fault = kvm_vcpu_fault, 3379 }; 3380 3381 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 3382 { 3383 struct kvm_vcpu *vcpu = file->private_data; 3384 unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 3385 3386 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || 3387 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && 3388 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED))) 3389 return -EINVAL; 3390 3391 vma->vm_ops = &kvm_vcpu_vm_ops; 3392 return 0; 3393 } 3394 3395 static int kvm_vcpu_release(struct inode *inode, struct file *filp) 3396 { 3397 struct kvm_vcpu *vcpu = filp->private_data; 3398 3399 kvm_put_kvm(vcpu->kvm); 3400 return 0; 3401 } 3402 3403 static struct file_operations kvm_vcpu_fops = { 3404 .release = kvm_vcpu_release, 3405 .unlocked_ioctl = kvm_vcpu_ioctl, 3406 .mmap = kvm_vcpu_mmap, 3407 .llseek = noop_llseek, 3408 KVM_COMPAT(kvm_vcpu_compat_ioctl), 3409 }; 3410 3411 /* 3412 * Allocates an inode for the vcpu. 3413 */ 3414 static int create_vcpu_fd(struct kvm_vcpu *vcpu) 3415 { 3416 char name[8 + 1 + ITOA_MAX_LEN + 1]; 3417 3418 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id); 3419 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 3420 } 3421 3422 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) 3423 { 3424 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 3425 struct dentry *debugfs_dentry; 3426 char dir_name[ITOA_MAX_LEN * 2]; 3427 3428 if (!debugfs_initialized()) 3429 return; 3430 3431 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id); 3432 debugfs_dentry = debugfs_create_dir(dir_name, 3433 vcpu->kvm->debugfs_dentry); 3434 3435 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry); 3436 #endif 3437 } 3438 3439 /* 3440 * Creates some virtual cpus. Good luck creating more than one. 3441 */ 3442 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 3443 { 3444 int r; 3445 struct kvm_vcpu *vcpu; 3446 struct page *page; 3447 3448 if (id >= KVM_MAX_VCPU_ID) 3449 return -EINVAL; 3450 3451 mutex_lock(&kvm->lock); 3452 if (kvm->created_vcpus == KVM_MAX_VCPUS) { 3453 mutex_unlock(&kvm->lock); 3454 return -EINVAL; 3455 } 3456 3457 kvm->created_vcpus++; 3458 mutex_unlock(&kvm->lock); 3459 3460 r = kvm_arch_vcpu_precreate(kvm, id); 3461 if (r) 3462 goto vcpu_decrement; 3463 3464 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 3465 if (!vcpu) { 3466 r = -ENOMEM; 3467 goto vcpu_decrement; 3468 } 3469 3470 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE); 3471 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 3472 if (!page) { 3473 r = -ENOMEM; 3474 goto vcpu_free; 3475 } 3476 vcpu->run = page_address(page); 3477 3478 kvm_vcpu_init(vcpu, kvm, id); 3479 3480 r = kvm_arch_vcpu_create(vcpu); 3481 if (r) 3482 goto vcpu_free_run_page; 3483 3484 if (kvm->dirty_ring_size) { 3485 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring, 3486 id, kvm->dirty_ring_size); 3487 if (r) 3488 goto arch_vcpu_destroy; 3489 } 3490 3491 mutex_lock(&kvm->lock); 3492 if (kvm_get_vcpu_by_id(kvm, id)) { 3493 r = -EEXIST; 3494 goto unlock_vcpu_destroy; 3495 } 3496 3497 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3498 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); 3499 3500 /* Fill the stats id string for the vcpu */ 3501 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", 3502 task_pid_nr(current), id); 3503 3504 /* Now it's all set up, let userspace reach it */ 3505 kvm_get_kvm(kvm); 3506 r = create_vcpu_fd(vcpu); 3507 if (r < 0) { 3508 kvm_put_kvm_no_destroy(kvm); 3509 goto unlock_vcpu_destroy; 3510 } 3511 3512 kvm->vcpus[vcpu->vcpu_idx] = vcpu; 3513 3514 /* 3515 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 3516 * before kvm->online_vcpu's incremented value. 3517 */ 3518 smp_wmb(); 3519 atomic_inc(&kvm->online_vcpus); 3520 3521 mutex_unlock(&kvm->lock); 3522 kvm_arch_vcpu_postcreate(vcpu); 3523 kvm_create_vcpu_debugfs(vcpu); 3524 return r; 3525 3526 unlock_vcpu_destroy: 3527 mutex_unlock(&kvm->lock); 3528 kvm_dirty_ring_free(&vcpu->dirty_ring); 3529 arch_vcpu_destroy: 3530 kvm_arch_vcpu_destroy(vcpu); 3531 vcpu_free_run_page: 3532 free_page((unsigned long)vcpu->run); 3533 vcpu_free: 3534 kmem_cache_free(kvm_vcpu_cache, vcpu); 3535 vcpu_decrement: 3536 mutex_lock(&kvm->lock); 3537 kvm->created_vcpus--; 3538 mutex_unlock(&kvm->lock); 3539 return r; 3540 } 3541 3542 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 3543 { 3544 if (sigset) { 3545 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3546 vcpu->sigset_active = 1; 3547 vcpu->sigset = *sigset; 3548 } else 3549 vcpu->sigset_active = 0; 3550 return 0; 3551 } 3552 3553 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, 3554 size_t size, loff_t *offset) 3555 { 3556 struct kvm_vcpu *vcpu = file->private_data; 3557 3558 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header, 3559 &kvm_vcpu_stats_desc[0], &vcpu->stat, 3560 sizeof(vcpu->stat), user_buffer, size, offset); 3561 } 3562 3563 static const struct file_operations kvm_vcpu_stats_fops = { 3564 .read = kvm_vcpu_stats_read, 3565 .llseek = noop_llseek, 3566 }; 3567 3568 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) 3569 { 3570 int fd; 3571 struct file *file; 3572 char name[15 + ITOA_MAX_LEN + 1]; 3573 3574 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id); 3575 3576 fd = get_unused_fd_flags(O_CLOEXEC); 3577 if (fd < 0) 3578 return fd; 3579 3580 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY); 3581 if (IS_ERR(file)) { 3582 put_unused_fd(fd); 3583 return PTR_ERR(file); 3584 } 3585 file->f_mode |= FMODE_PREAD; 3586 fd_install(fd, file); 3587 3588 return fd; 3589 } 3590 3591 static long kvm_vcpu_ioctl(struct file *filp, 3592 unsigned int ioctl, unsigned long arg) 3593 { 3594 struct kvm_vcpu *vcpu = filp->private_data; 3595 void __user *argp = (void __user *)arg; 3596 int r; 3597 struct kvm_fpu *fpu = NULL; 3598 struct kvm_sregs *kvm_sregs = NULL; 3599 3600 if (vcpu->kvm->mm != current->mm) 3601 return -EIO; 3602 3603 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 3604 return -EINVAL; 3605 3606 /* 3607 * Some architectures have vcpu ioctls that are asynchronous to vcpu 3608 * execution; mutex_lock() would break them. 3609 */ 3610 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg); 3611 if (r != -ENOIOCTLCMD) 3612 return r; 3613 3614 if (mutex_lock_killable(&vcpu->mutex)) 3615 return -EINTR; 3616 switch (ioctl) { 3617 case KVM_RUN: { 3618 struct pid *oldpid; 3619 r = -EINVAL; 3620 if (arg) 3621 goto out; 3622 oldpid = rcu_access_pointer(vcpu->pid); 3623 if (unlikely(oldpid != task_pid(current))) { 3624 /* The thread running this VCPU changed. */ 3625 struct pid *newpid; 3626 3627 r = kvm_arch_vcpu_run_pid_change(vcpu); 3628 if (r) 3629 break; 3630 3631 newpid = get_task_pid(current, PIDTYPE_PID); 3632 rcu_assign_pointer(vcpu->pid, newpid); 3633 if (oldpid) 3634 synchronize_rcu(); 3635 put_pid(oldpid); 3636 } 3637 r = kvm_arch_vcpu_ioctl_run(vcpu); 3638 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 3639 break; 3640 } 3641 case KVM_GET_REGS: { 3642 struct kvm_regs *kvm_regs; 3643 3644 r = -ENOMEM; 3645 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT); 3646 if (!kvm_regs) 3647 goto out; 3648 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 3649 if (r) 3650 goto out_free1; 3651 r = -EFAULT; 3652 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 3653 goto out_free1; 3654 r = 0; 3655 out_free1: 3656 kfree(kvm_regs); 3657 break; 3658 } 3659 case KVM_SET_REGS: { 3660 struct kvm_regs *kvm_regs; 3661 3662 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 3663 if (IS_ERR(kvm_regs)) { 3664 r = PTR_ERR(kvm_regs); 3665 goto out; 3666 } 3667 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 3668 kfree(kvm_regs); 3669 break; 3670 } 3671 case KVM_GET_SREGS: { 3672 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), 3673 GFP_KERNEL_ACCOUNT); 3674 r = -ENOMEM; 3675 if (!kvm_sregs) 3676 goto out; 3677 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 3678 if (r) 3679 goto out; 3680 r = -EFAULT; 3681 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 3682 goto out; 3683 r = 0; 3684 break; 3685 } 3686 case KVM_SET_SREGS: { 3687 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 3688 if (IS_ERR(kvm_sregs)) { 3689 r = PTR_ERR(kvm_sregs); 3690 kvm_sregs = NULL; 3691 goto out; 3692 } 3693 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 3694 break; 3695 } 3696 case KVM_GET_MP_STATE: { 3697 struct kvm_mp_state mp_state; 3698 3699 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 3700 if (r) 3701 goto out; 3702 r = -EFAULT; 3703 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 3704 goto out; 3705 r = 0; 3706 break; 3707 } 3708 case KVM_SET_MP_STATE: { 3709 struct kvm_mp_state mp_state; 3710 3711 r = -EFAULT; 3712 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 3713 goto out; 3714 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 3715 break; 3716 } 3717 case KVM_TRANSLATE: { 3718 struct kvm_translation tr; 3719 3720 r = -EFAULT; 3721 if (copy_from_user(&tr, argp, sizeof(tr))) 3722 goto out; 3723 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 3724 if (r) 3725 goto out; 3726 r = -EFAULT; 3727 if (copy_to_user(argp, &tr, sizeof(tr))) 3728 goto out; 3729 r = 0; 3730 break; 3731 } 3732 case KVM_SET_GUEST_DEBUG: { 3733 struct kvm_guest_debug dbg; 3734 3735 r = -EFAULT; 3736 if (copy_from_user(&dbg, argp, sizeof(dbg))) 3737 goto out; 3738 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 3739 break; 3740 } 3741 case KVM_SET_SIGNAL_MASK: { 3742 struct kvm_signal_mask __user *sigmask_arg = argp; 3743 struct kvm_signal_mask kvm_sigmask; 3744 sigset_t sigset, *p; 3745 3746 p = NULL; 3747 if (argp) { 3748 r = -EFAULT; 3749 if (copy_from_user(&kvm_sigmask, argp, 3750 sizeof(kvm_sigmask))) 3751 goto out; 3752 r = -EINVAL; 3753 if (kvm_sigmask.len != sizeof(sigset)) 3754 goto out; 3755 r = -EFAULT; 3756 if (copy_from_user(&sigset, sigmask_arg->sigset, 3757 sizeof(sigset))) 3758 goto out; 3759 p = &sigset; 3760 } 3761 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 3762 break; 3763 } 3764 case KVM_GET_FPU: { 3765 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT); 3766 r = -ENOMEM; 3767 if (!fpu) 3768 goto out; 3769 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 3770 if (r) 3771 goto out; 3772 r = -EFAULT; 3773 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 3774 goto out; 3775 r = 0; 3776 break; 3777 } 3778 case KVM_SET_FPU: { 3779 fpu = memdup_user(argp, sizeof(*fpu)); 3780 if (IS_ERR(fpu)) { 3781 r = PTR_ERR(fpu); 3782 fpu = NULL; 3783 goto out; 3784 } 3785 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 3786 break; 3787 } 3788 case KVM_GET_STATS_FD: { 3789 r = kvm_vcpu_ioctl_get_stats_fd(vcpu); 3790 break; 3791 } 3792 default: 3793 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 3794 } 3795 out: 3796 mutex_unlock(&vcpu->mutex); 3797 kfree(fpu); 3798 kfree(kvm_sregs); 3799 return r; 3800 } 3801 3802 #ifdef CONFIG_KVM_COMPAT 3803 static long kvm_vcpu_compat_ioctl(struct file *filp, 3804 unsigned int ioctl, unsigned long arg) 3805 { 3806 struct kvm_vcpu *vcpu = filp->private_data; 3807 void __user *argp = compat_ptr(arg); 3808 int r; 3809 3810 if (vcpu->kvm->mm != current->mm) 3811 return -EIO; 3812 3813 switch (ioctl) { 3814 case KVM_SET_SIGNAL_MASK: { 3815 struct kvm_signal_mask __user *sigmask_arg = argp; 3816 struct kvm_signal_mask kvm_sigmask; 3817 sigset_t sigset; 3818 3819 if (argp) { 3820 r = -EFAULT; 3821 if (copy_from_user(&kvm_sigmask, argp, 3822 sizeof(kvm_sigmask))) 3823 goto out; 3824 r = -EINVAL; 3825 if (kvm_sigmask.len != sizeof(compat_sigset_t)) 3826 goto out; 3827 r = -EFAULT; 3828 if (get_compat_sigset(&sigset, 3829 (compat_sigset_t __user *)sigmask_arg->sigset)) 3830 goto out; 3831 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 3832 } else 3833 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 3834 break; 3835 } 3836 default: 3837 r = kvm_vcpu_ioctl(filp, ioctl, arg); 3838 } 3839 3840 out: 3841 return r; 3842 } 3843 #endif 3844 3845 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma) 3846 { 3847 struct kvm_device *dev = filp->private_data; 3848 3849 if (dev->ops->mmap) 3850 return dev->ops->mmap(dev, vma); 3851 3852 return -ENODEV; 3853 } 3854 3855 static int kvm_device_ioctl_attr(struct kvm_device *dev, 3856 int (*accessor)(struct kvm_device *dev, 3857 struct kvm_device_attr *attr), 3858 unsigned long arg) 3859 { 3860 struct kvm_device_attr attr; 3861 3862 if (!accessor) 3863 return -EPERM; 3864 3865 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 3866 return -EFAULT; 3867 3868 return accessor(dev, &attr); 3869 } 3870 3871 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 3872 unsigned long arg) 3873 { 3874 struct kvm_device *dev = filp->private_data; 3875 3876 if (dev->kvm->mm != current->mm) 3877 return -EIO; 3878 3879 switch (ioctl) { 3880 case KVM_SET_DEVICE_ATTR: 3881 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 3882 case KVM_GET_DEVICE_ATTR: 3883 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 3884 case KVM_HAS_DEVICE_ATTR: 3885 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 3886 default: 3887 if (dev->ops->ioctl) 3888 return dev->ops->ioctl(dev, ioctl, arg); 3889 3890 return -ENOTTY; 3891 } 3892 } 3893 3894 static int kvm_device_release(struct inode *inode, struct file *filp) 3895 { 3896 struct kvm_device *dev = filp->private_data; 3897 struct kvm *kvm = dev->kvm; 3898 3899 if (dev->ops->release) { 3900 mutex_lock(&kvm->lock); 3901 list_del(&dev->vm_node); 3902 dev->ops->release(dev); 3903 mutex_unlock(&kvm->lock); 3904 } 3905 3906 kvm_put_kvm(kvm); 3907 return 0; 3908 } 3909 3910 static const struct file_operations kvm_device_fops = { 3911 .unlocked_ioctl = kvm_device_ioctl, 3912 .release = kvm_device_release, 3913 KVM_COMPAT(kvm_device_ioctl), 3914 .mmap = kvm_device_mmap, 3915 }; 3916 3917 struct kvm_device *kvm_device_from_filp(struct file *filp) 3918 { 3919 if (filp->f_op != &kvm_device_fops) 3920 return NULL; 3921 3922 return filp->private_data; 3923 } 3924 3925 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 3926 #ifdef CONFIG_KVM_MPIC 3927 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 3928 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 3929 #endif 3930 }; 3931 3932 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type) 3933 { 3934 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 3935 return -ENOSPC; 3936 3937 if (kvm_device_ops_table[type] != NULL) 3938 return -EEXIST; 3939 3940 kvm_device_ops_table[type] = ops; 3941 return 0; 3942 } 3943 3944 void kvm_unregister_device_ops(u32 type) 3945 { 3946 if (kvm_device_ops_table[type] != NULL) 3947 kvm_device_ops_table[type] = NULL; 3948 } 3949 3950 static int kvm_ioctl_create_device(struct kvm *kvm, 3951 struct kvm_create_device *cd) 3952 { 3953 const struct kvm_device_ops *ops = NULL; 3954 struct kvm_device *dev; 3955 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 3956 int type; 3957 int ret; 3958 3959 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 3960 return -ENODEV; 3961 3962 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); 3963 ops = kvm_device_ops_table[type]; 3964 if (ops == NULL) 3965 return -ENODEV; 3966 3967 if (test) 3968 return 0; 3969 3970 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT); 3971 if (!dev) 3972 return -ENOMEM; 3973 3974 dev->ops = ops; 3975 dev->kvm = kvm; 3976 3977 mutex_lock(&kvm->lock); 3978 ret = ops->create(dev, type); 3979 if (ret < 0) { 3980 mutex_unlock(&kvm->lock); 3981 kfree(dev); 3982 return ret; 3983 } 3984 list_add(&dev->vm_node, &kvm->devices); 3985 mutex_unlock(&kvm->lock); 3986 3987 if (ops->init) 3988 ops->init(dev); 3989 3990 kvm_get_kvm(kvm); 3991 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3992 if (ret < 0) { 3993 kvm_put_kvm_no_destroy(kvm); 3994 mutex_lock(&kvm->lock); 3995 list_del(&dev->vm_node); 3996 mutex_unlock(&kvm->lock); 3997 ops->destroy(dev); 3998 return ret; 3999 } 4000 4001 cd->fd = ret; 4002 return 0; 4003 } 4004 4005 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 4006 { 4007 switch (arg) { 4008 case KVM_CAP_USER_MEMORY: 4009 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 4010 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 4011 case KVM_CAP_INTERNAL_ERROR_DATA: 4012 #ifdef CONFIG_HAVE_KVM_MSI 4013 case KVM_CAP_SIGNAL_MSI: 4014 #endif 4015 #ifdef CONFIG_HAVE_KVM_IRQFD 4016 case KVM_CAP_IRQFD: 4017 case KVM_CAP_IRQFD_RESAMPLE: 4018 #endif 4019 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 4020 case KVM_CAP_CHECK_EXTENSION_VM: 4021 case KVM_CAP_ENABLE_CAP_VM: 4022 case KVM_CAP_HALT_POLL: 4023 return 1; 4024 #ifdef CONFIG_KVM_MMIO 4025 case KVM_CAP_COALESCED_MMIO: 4026 return KVM_COALESCED_MMIO_PAGE_OFFSET; 4027 case KVM_CAP_COALESCED_PIO: 4028 return 1; 4029 #endif 4030 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4031 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: 4032 return KVM_DIRTY_LOG_MANUAL_CAPS; 4033 #endif 4034 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4035 case KVM_CAP_IRQ_ROUTING: 4036 return KVM_MAX_IRQ_ROUTES; 4037 #endif 4038 #if KVM_ADDRESS_SPACE_NUM > 1 4039 case KVM_CAP_MULTI_ADDRESS_SPACE: 4040 return KVM_ADDRESS_SPACE_NUM; 4041 #endif 4042 case KVM_CAP_NR_MEMSLOTS: 4043 return KVM_USER_MEM_SLOTS; 4044 case KVM_CAP_DIRTY_LOG_RING: 4045 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0 4046 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); 4047 #else 4048 return 0; 4049 #endif 4050 case KVM_CAP_BINARY_STATS_FD: 4051 return 1; 4052 default: 4053 break; 4054 } 4055 return kvm_vm_ioctl_check_extension(kvm, arg); 4056 } 4057 4058 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) 4059 { 4060 int r; 4061 4062 if (!KVM_DIRTY_LOG_PAGE_OFFSET) 4063 return -EINVAL; 4064 4065 /* the size should be power of 2 */ 4066 if (!size || (size & (size - 1))) 4067 return -EINVAL; 4068 4069 /* Should be bigger to keep the reserved entries, or a page */ 4070 if (size < kvm_dirty_ring_get_rsvd_entries() * 4071 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE) 4072 return -EINVAL; 4073 4074 if (size > KVM_DIRTY_RING_MAX_ENTRIES * 4075 sizeof(struct kvm_dirty_gfn)) 4076 return -E2BIG; 4077 4078 /* We only allow it to set once */ 4079 if (kvm->dirty_ring_size) 4080 return -EINVAL; 4081 4082 mutex_lock(&kvm->lock); 4083 4084 if (kvm->created_vcpus) { 4085 /* We don't allow to change this value after vcpu created */ 4086 r = -EINVAL; 4087 } else { 4088 kvm->dirty_ring_size = size; 4089 r = 0; 4090 } 4091 4092 mutex_unlock(&kvm->lock); 4093 return r; 4094 } 4095 4096 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) 4097 { 4098 int i; 4099 struct kvm_vcpu *vcpu; 4100 int cleared = 0; 4101 4102 if (!kvm->dirty_ring_size) 4103 return -EINVAL; 4104 4105 mutex_lock(&kvm->slots_lock); 4106 4107 kvm_for_each_vcpu(i, vcpu, kvm) 4108 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); 4109 4110 mutex_unlock(&kvm->slots_lock); 4111 4112 if (cleared) 4113 kvm_flush_remote_tlbs(kvm); 4114 4115 return cleared; 4116 } 4117 4118 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, 4119 struct kvm_enable_cap *cap) 4120 { 4121 return -EINVAL; 4122 } 4123 4124 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, 4125 struct kvm_enable_cap *cap) 4126 { 4127 switch (cap->cap) { 4128 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4129 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: { 4130 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE; 4131 4132 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE) 4133 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS; 4134 4135 if (cap->flags || (cap->args[0] & ~allowed_options)) 4136 return -EINVAL; 4137 kvm->manual_dirty_log_protect = cap->args[0]; 4138 return 0; 4139 } 4140 #endif 4141 case KVM_CAP_HALT_POLL: { 4142 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0]) 4143 return -EINVAL; 4144 4145 kvm->max_halt_poll_ns = cap->args[0]; 4146 return 0; 4147 } 4148 case KVM_CAP_DIRTY_LOG_RING: 4149 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4150 default: 4151 return kvm_vm_ioctl_enable_cap(kvm, cap); 4152 } 4153 } 4154 4155 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, 4156 size_t size, loff_t *offset) 4157 { 4158 struct kvm *kvm = file->private_data; 4159 4160 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, 4161 &kvm_vm_stats_desc[0], &kvm->stat, 4162 sizeof(kvm->stat), user_buffer, size, offset); 4163 } 4164 4165 static const struct file_operations kvm_vm_stats_fops = { 4166 .read = kvm_vm_stats_read, 4167 .llseek = noop_llseek, 4168 }; 4169 4170 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) 4171 { 4172 int fd; 4173 struct file *file; 4174 4175 fd = get_unused_fd_flags(O_CLOEXEC); 4176 if (fd < 0) 4177 return fd; 4178 4179 file = anon_inode_getfile("kvm-vm-stats", 4180 &kvm_vm_stats_fops, kvm, O_RDONLY); 4181 if (IS_ERR(file)) { 4182 put_unused_fd(fd); 4183 return PTR_ERR(file); 4184 } 4185 file->f_mode |= FMODE_PREAD; 4186 fd_install(fd, file); 4187 4188 return fd; 4189 } 4190 4191 static long kvm_vm_ioctl(struct file *filp, 4192 unsigned int ioctl, unsigned long arg) 4193 { 4194 struct kvm *kvm = filp->private_data; 4195 void __user *argp = (void __user *)arg; 4196 int r; 4197 4198 if (kvm->mm != current->mm) 4199 return -EIO; 4200 switch (ioctl) { 4201 case KVM_CREATE_VCPU: 4202 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 4203 break; 4204 case KVM_ENABLE_CAP: { 4205 struct kvm_enable_cap cap; 4206 4207 r = -EFAULT; 4208 if (copy_from_user(&cap, argp, sizeof(cap))) 4209 goto out; 4210 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); 4211 break; 4212 } 4213 case KVM_SET_USER_MEMORY_REGION: { 4214 struct kvm_userspace_memory_region kvm_userspace_mem; 4215 4216 r = -EFAULT; 4217 if (copy_from_user(&kvm_userspace_mem, argp, 4218 sizeof(kvm_userspace_mem))) 4219 goto out; 4220 4221 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 4222 break; 4223 } 4224 case KVM_GET_DIRTY_LOG: { 4225 struct kvm_dirty_log log; 4226 4227 r = -EFAULT; 4228 if (copy_from_user(&log, argp, sizeof(log))) 4229 goto out; 4230 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4231 break; 4232 } 4233 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4234 case KVM_CLEAR_DIRTY_LOG: { 4235 struct kvm_clear_dirty_log log; 4236 4237 r = -EFAULT; 4238 if (copy_from_user(&log, argp, sizeof(log))) 4239 goto out; 4240 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); 4241 break; 4242 } 4243 #endif 4244 #ifdef CONFIG_KVM_MMIO 4245 case KVM_REGISTER_COALESCED_MMIO: { 4246 struct kvm_coalesced_mmio_zone zone; 4247 4248 r = -EFAULT; 4249 if (copy_from_user(&zone, argp, sizeof(zone))) 4250 goto out; 4251 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 4252 break; 4253 } 4254 case KVM_UNREGISTER_COALESCED_MMIO: { 4255 struct kvm_coalesced_mmio_zone zone; 4256 4257 r = -EFAULT; 4258 if (copy_from_user(&zone, argp, sizeof(zone))) 4259 goto out; 4260 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 4261 break; 4262 } 4263 #endif 4264 case KVM_IRQFD: { 4265 struct kvm_irqfd data; 4266 4267 r = -EFAULT; 4268 if (copy_from_user(&data, argp, sizeof(data))) 4269 goto out; 4270 r = kvm_irqfd(kvm, &data); 4271 break; 4272 } 4273 case KVM_IOEVENTFD: { 4274 struct kvm_ioeventfd data; 4275 4276 r = -EFAULT; 4277 if (copy_from_user(&data, argp, sizeof(data))) 4278 goto out; 4279 r = kvm_ioeventfd(kvm, &data); 4280 break; 4281 } 4282 #ifdef CONFIG_HAVE_KVM_MSI 4283 case KVM_SIGNAL_MSI: { 4284 struct kvm_msi msi; 4285 4286 r = -EFAULT; 4287 if (copy_from_user(&msi, argp, sizeof(msi))) 4288 goto out; 4289 r = kvm_send_userspace_msi(kvm, &msi); 4290 break; 4291 } 4292 #endif 4293 #ifdef __KVM_HAVE_IRQ_LINE 4294 case KVM_IRQ_LINE_STATUS: 4295 case KVM_IRQ_LINE: { 4296 struct kvm_irq_level irq_event; 4297 4298 r = -EFAULT; 4299 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 4300 goto out; 4301 4302 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 4303 ioctl == KVM_IRQ_LINE_STATUS); 4304 if (r) 4305 goto out; 4306 4307 r = -EFAULT; 4308 if (ioctl == KVM_IRQ_LINE_STATUS) { 4309 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 4310 goto out; 4311 } 4312 4313 r = 0; 4314 break; 4315 } 4316 #endif 4317 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 4318 case KVM_SET_GSI_ROUTING: { 4319 struct kvm_irq_routing routing; 4320 struct kvm_irq_routing __user *urouting; 4321 struct kvm_irq_routing_entry *entries = NULL; 4322 4323 r = -EFAULT; 4324 if (copy_from_user(&routing, argp, sizeof(routing))) 4325 goto out; 4326 r = -EINVAL; 4327 if (!kvm_arch_can_set_irq_routing(kvm)) 4328 goto out; 4329 if (routing.nr > KVM_MAX_IRQ_ROUTES) 4330 goto out; 4331 if (routing.flags) 4332 goto out; 4333 if (routing.nr) { 4334 urouting = argp; 4335 entries = vmemdup_user(urouting->entries, 4336 array_size(sizeof(*entries), 4337 routing.nr)); 4338 if (IS_ERR(entries)) { 4339 r = PTR_ERR(entries); 4340 goto out; 4341 } 4342 } 4343 r = kvm_set_irq_routing(kvm, entries, routing.nr, 4344 routing.flags); 4345 kvfree(entries); 4346 break; 4347 } 4348 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 4349 case KVM_CREATE_DEVICE: { 4350 struct kvm_create_device cd; 4351 4352 r = -EFAULT; 4353 if (copy_from_user(&cd, argp, sizeof(cd))) 4354 goto out; 4355 4356 r = kvm_ioctl_create_device(kvm, &cd); 4357 if (r) 4358 goto out; 4359 4360 r = -EFAULT; 4361 if (copy_to_user(argp, &cd, sizeof(cd))) 4362 goto out; 4363 4364 r = 0; 4365 break; 4366 } 4367 case KVM_CHECK_EXTENSION: 4368 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 4369 break; 4370 case KVM_RESET_DIRTY_RINGS: 4371 r = kvm_vm_ioctl_reset_dirty_pages(kvm); 4372 break; 4373 case KVM_GET_STATS_FD: 4374 r = kvm_vm_ioctl_get_stats_fd(kvm); 4375 break; 4376 default: 4377 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 4378 } 4379 out: 4380 return r; 4381 } 4382 4383 #ifdef CONFIG_KVM_COMPAT 4384 struct compat_kvm_dirty_log { 4385 __u32 slot; 4386 __u32 padding1; 4387 union { 4388 compat_uptr_t dirty_bitmap; /* one bit per page */ 4389 __u64 padding2; 4390 }; 4391 }; 4392 4393 static long kvm_vm_compat_ioctl(struct file *filp, 4394 unsigned int ioctl, unsigned long arg) 4395 { 4396 struct kvm *kvm = filp->private_data; 4397 int r; 4398 4399 if (kvm->mm != current->mm) 4400 return -EIO; 4401 switch (ioctl) { 4402 case KVM_GET_DIRTY_LOG: { 4403 struct compat_kvm_dirty_log compat_log; 4404 struct kvm_dirty_log log; 4405 4406 if (copy_from_user(&compat_log, (void __user *)arg, 4407 sizeof(compat_log))) 4408 return -EFAULT; 4409 log.slot = compat_log.slot; 4410 log.padding1 = compat_log.padding1; 4411 log.padding2 = compat_log.padding2; 4412 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 4413 4414 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 4415 break; 4416 } 4417 default: 4418 r = kvm_vm_ioctl(filp, ioctl, arg); 4419 } 4420 return r; 4421 } 4422 #endif 4423 4424 static struct file_operations kvm_vm_fops = { 4425 .release = kvm_vm_release, 4426 .unlocked_ioctl = kvm_vm_ioctl, 4427 .llseek = noop_llseek, 4428 KVM_COMPAT(kvm_vm_compat_ioctl), 4429 }; 4430 4431 bool file_is_kvm(struct file *file) 4432 { 4433 return file && file->f_op == &kvm_vm_fops; 4434 } 4435 EXPORT_SYMBOL_GPL(file_is_kvm); 4436 4437 static int kvm_dev_ioctl_create_vm(unsigned long type) 4438 { 4439 int r; 4440 struct kvm *kvm; 4441 struct file *file; 4442 4443 kvm = kvm_create_vm(type); 4444 if (IS_ERR(kvm)) 4445 return PTR_ERR(kvm); 4446 #ifdef CONFIG_KVM_MMIO 4447 r = kvm_coalesced_mmio_init(kvm); 4448 if (r < 0) 4449 goto put_kvm; 4450 #endif 4451 r = get_unused_fd_flags(O_CLOEXEC); 4452 if (r < 0) 4453 goto put_kvm; 4454 4455 snprintf(kvm->stats_id, sizeof(kvm->stats_id), 4456 "kvm-%d", task_pid_nr(current)); 4457 4458 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 4459 if (IS_ERR(file)) { 4460 put_unused_fd(r); 4461 r = PTR_ERR(file); 4462 goto put_kvm; 4463 } 4464 4465 /* 4466 * Don't call kvm_put_kvm anymore at this point; file->f_op is 4467 * already set, with ->release() being kvm_vm_release(). In error 4468 * cases it will be called by the final fput(file) and will take 4469 * care of doing kvm_put_kvm(kvm). 4470 */ 4471 if (kvm_create_vm_debugfs(kvm, r) < 0) { 4472 put_unused_fd(r); 4473 fput(file); 4474 return -ENOMEM; 4475 } 4476 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); 4477 4478 fd_install(r, file); 4479 return r; 4480 4481 put_kvm: 4482 kvm_put_kvm(kvm); 4483 return r; 4484 } 4485 4486 static long kvm_dev_ioctl(struct file *filp, 4487 unsigned int ioctl, unsigned long arg) 4488 { 4489 long r = -EINVAL; 4490 4491 switch (ioctl) { 4492 case KVM_GET_API_VERSION: 4493 if (arg) 4494 goto out; 4495 r = KVM_API_VERSION; 4496 break; 4497 case KVM_CREATE_VM: 4498 r = kvm_dev_ioctl_create_vm(arg); 4499 break; 4500 case KVM_CHECK_EXTENSION: 4501 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 4502 break; 4503 case KVM_GET_VCPU_MMAP_SIZE: 4504 if (arg) 4505 goto out; 4506 r = PAGE_SIZE; /* struct kvm_run */ 4507 #ifdef CONFIG_X86 4508 r += PAGE_SIZE; /* pio data page */ 4509 #endif 4510 #ifdef CONFIG_KVM_MMIO 4511 r += PAGE_SIZE; /* coalesced mmio ring page */ 4512 #endif 4513 break; 4514 case KVM_TRACE_ENABLE: 4515 case KVM_TRACE_PAUSE: 4516 case KVM_TRACE_DISABLE: 4517 r = -EOPNOTSUPP; 4518 break; 4519 default: 4520 return kvm_arch_dev_ioctl(filp, ioctl, arg); 4521 } 4522 out: 4523 return r; 4524 } 4525 4526 static struct file_operations kvm_chardev_ops = { 4527 .unlocked_ioctl = kvm_dev_ioctl, 4528 .llseek = noop_llseek, 4529 KVM_COMPAT(kvm_dev_ioctl), 4530 }; 4531 4532 static struct miscdevice kvm_dev = { 4533 KVM_MINOR, 4534 "kvm", 4535 &kvm_chardev_ops, 4536 }; 4537 4538 static void hardware_enable_nolock(void *junk) 4539 { 4540 int cpu = raw_smp_processor_id(); 4541 int r; 4542 4543 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4544 return; 4545 4546 cpumask_set_cpu(cpu, cpus_hardware_enabled); 4547 4548 r = kvm_arch_hardware_enable(); 4549 4550 if (r) { 4551 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4552 atomic_inc(&hardware_enable_failed); 4553 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 4554 } 4555 } 4556 4557 static int kvm_starting_cpu(unsigned int cpu) 4558 { 4559 raw_spin_lock(&kvm_count_lock); 4560 if (kvm_usage_count) 4561 hardware_enable_nolock(NULL); 4562 raw_spin_unlock(&kvm_count_lock); 4563 return 0; 4564 } 4565 4566 static void hardware_disable_nolock(void *junk) 4567 { 4568 int cpu = raw_smp_processor_id(); 4569 4570 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 4571 return; 4572 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 4573 kvm_arch_hardware_disable(); 4574 } 4575 4576 static int kvm_dying_cpu(unsigned int cpu) 4577 { 4578 raw_spin_lock(&kvm_count_lock); 4579 if (kvm_usage_count) 4580 hardware_disable_nolock(NULL); 4581 raw_spin_unlock(&kvm_count_lock); 4582 return 0; 4583 } 4584 4585 static void hardware_disable_all_nolock(void) 4586 { 4587 BUG_ON(!kvm_usage_count); 4588 4589 kvm_usage_count--; 4590 if (!kvm_usage_count) 4591 on_each_cpu(hardware_disable_nolock, NULL, 1); 4592 } 4593 4594 static void hardware_disable_all(void) 4595 { 4596 raw_spin_lock(&kvm_count_lock); 4597 hardware_disable_all_nolock(); 4598 raw_spin_unlock(&kvm_count_lock); 4599 } 4600 4601 static int hardware_enable_all(void) 4602 { 4603 int r = 0; 4604 4605 raw_spin_lock(&kvm_count_lock); 4606 4607 kvm_usage_count++; 4608 if (kvm_usage_count == 1) { 4609 atomic_set(&hardware_enable_failed, 0); 4610 on_each_cpu(hardware_enable_nolock, NULL, 1); 4611 4612 if (atomic_read(&hardware_enable_failed)) { 4613 hardware_disable_all_nolock(); 4614 r = -EBUSY; 4615 } 4616 } 4617 4618 raw_spin_unlock(&kvm_count_lock); 4619 4620 return r; 4621 } 4622 4623 static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 4624 void *v) 4625 { 4626 /* 4627 * Some (well, at least mine) BIOSes hang on reboot if 4628 * in vmx root mode. 4629 * 4630 * And Intel TXT required VMX off for all cpu when system shutdown. 4631 */ 4632 pr_info("kvm: exiting hardware virtualization\n"); 4633 kvm_rebooting = true; 4634 on_each_cpu(hardware_disable_nolock, NULL, 1); 4635 return NOTIFY_OK; 4636 } 4637 4638 static struct notifier_block kvm_reboot_notifier = { 4639 .notifier_call = kvm_reboot, 4640 .priority = 0, 4641 }; 4642 4643 static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 4644 { 4645 int i; 4646 4647 for (i = 0; i < bus->dev_count; i++) { 4648 struct kvm_io_device *pos = bus->range[i].dev; 4649 4650 kvm_iodevice_destructor(pos); 4651 } 4652 kfree(bus); 4653 } 4654 4655 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 4656 const struct kvm_io_range *r2) 4657 { 4658 gpa_t addr1 = r1->addr; 4659 gpa_t addr2 = r2->addr; 4660 4661 if (addr1 < addr2) 4662 return -1; 4663 4664 /* If r2->len == 0, match the exact address. If r2->len != 0, 4665 * accept any overlapping write. Any order is acceptable for 4666 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 4667 * we process all of them. 4668 */ 4669 if (r2->len) { 4670 addr1 += r1->len; 4671 addr2 += r2->len; 4672 } 4673 4674 if (addr1 > addr2) 4675 return 1; 4676 4677 return 0; 4678 } 4679 4680 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 4681 { 4682 return kvm_io_bus_cmp(p1, p2); 4683 } 4684 4685 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 4686 gpa_t addr, int len) 4687 { 4688 struct kvm_io_range *range, key; 4689 int off; 4690 4691 key = (struct kvm_io_range) { 4692 .addr = addr, 4693 .len = len, 4694 }; 4695 4696 range = bsearch(&key, bus->range, bus->dev_count, 4697 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 4698 if (range == NULL) 4699 return -ENOENT; 4700 4701 off = range - bus->range; 4702 4703 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 4704 off--; 4705 4706 return off; 4707 } 4708 4709 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4710 struct kvm_io_range *range, const void *val) 4711 { 4712 int idx; 4713 4714 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4715 if (idx < 0) 4716 return -EOPNOTSUPP; 4717 4718 while (idx < bus->dev_count && 4719 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4720 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 4721 range->len, val)) 4722 return idx; 4723 idx++; 4724 } 4725 4726 return -EOPNOTSUPP; 4727 } 4728 4729 /* kvm_io_bus_write - called under kvm->slots_lock */ 4730 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4731 int len, const void *val) 4732 { 4733 struct kvm_io_bus *bus; 4734 struct kvm_io_range range; 4735 int r; 4736 4737 range = (struct kvm_io_range) { 4738 .addr = addr, 4739 .len = len, 4740 }; 4741 4742 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4743 if (!bus) 4744 return -ENOMEM; 4745 r = __kvm_io_bus_write(vcpu, bus, &range, val); 4746 return r < 0 ? r : 0; 4747 } 4748 EXPORT_SYMBOL_GPL(kvm_io_bus_write); 4749 4750 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 4751 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 4752 gpa_t addr, int len, const void *val, long cookie) 4753 { 4754 struct kvm_io_bus *bus; 4755 struct kvm_io_range range; 4756 4757 range = (struct kvm_io_range) { 4758 .addr = addr, 4759 .len = len, 4760 }; 4761 4762 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4763 if (!bus) 4764 return -ENOMEM; 4765 4766 /* First try the device referenced by cookie. */ 4767 if ((cookie >= 0) && (cookie < bus->dev_count) && 4768 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 4769 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 4770 val)) 4771 return cookie; 4772 4773 /* 4774 * cookie contained garbage; fall back to search and return the 4775 * correct cookie value. 4776 */ 4777 return __kvm_io_bus_write(vcpu, bus, &range, val); 4778 } 4779 4780 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 4781 struct kvm_io_range *range, void *val) 4782 { 4783 int idx; 4784 4785 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 4786 if (idx < 0) 4787 return -EOPNOTSUPP; 4788 4789 while (idx < bus->dev_count && 4790 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 4791 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 4792 range->len, val)) 4793 return idx; 4794 idx++; 4795 } 4796 4797 return -EOPNOTSUPP; 4798 } 4799 4800 /* kvm_io_bus_read - called under kvm->slots_lock */ 4801 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 4802 int len, void *val) 4803 { 4804 struct kvm_io_bus *bus; 4805 struct kvm_io_range range; 4806 int r; 4807 4808 range = (struct kvm_io_range) { 4809 .addr = addr, 4810 .len = len, 4811 }; 4812 4813 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 4814 if (!bus) 4815 return -ENOMEM; 4816 r = __kvm_io_bus_read(vcpu, bus, &range, val); 4817 return r < 0 ? r : 0; 4818 } 4819 4820 /* Caller must hold slots_lock. */ 4821 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 4822 int len, struct kvm_io_device *dev) 4823 { 4824 int i; 4825 struct kvm_io_bus *new_bus, *bus; 4826 struct kvm_io_range range; 4827 4828 bus = kvm_get_bus(kvm, bus_idx); 4829 if (!bus) 4830 return -ENOMEM; 4831 4832 /* exclude ioeventfd which is limited by maximum fd */ 4833 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 4834 return -ENOSPC; 4835 4836 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1), 4837 GFP_KERNEL_ACCOUNT); 4838 if (!new_bus) 4839 return -ENOMEM; 4840 4841 range = (struct kvm_io_range) { 4842 .addr = addr, 4843 .len = len, 4844 .dev = dev, 4845 }; 4846 4847 for (i = 0; i < bus->dev_count; i++) 4848 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0) 4849 break; 4850 4851 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 4852 new_bus->dev_count++; 4853 new_bus->range[i] = range; 4854 memcpy(new_bus->range + i + 1, bus->range + i, 4855 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 4856 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4857 synchronize_srcu_expedited(&kvm->srcu); 4858 kfree(bus); 4859 4860 return 0; 4861 } 4862 4863 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4864 struct kvm_io_device *dev) 4865 { 4866 int i, j; 4867 struct kvm_io_bus *new_bus, *bus; 4868 4869 lockdep_assert_held(&kvm->slots_lock); 4870 4871 bus = kvm_get_bus(kvm, bus_idx); 4872 if (!bus) 4873 return 0; 4874 4875 for (i = 0; i < bus->dev_count; i++) { 4876 if (bus->range[i].dev == dev) { 4877 break; 4878 } 4879 } 4880 4881 if (i == bus->dev_count) 4882 return 0; 4883 4884 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), 4885 GFP_KERNEL_ACCOUNT); 4886 if (new_bus) { 4887 memcpy(new_bus, bus, struct_size(bus, range, i)); 4888 new_bus->dev_count--; 4889 memcpy(new_bus->range + i, bus->range + i + 1, 4890 flex_array_size(new_bus, range, new_bus->dev_count - i)); 4891 } 4892 4893 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 4894 synchronize_srcu_expedited(&kvm->srcu); 4895 4896 /* Destroy the old bus _after_ installing the (null) bus. */ 4897 if (!new_bus) { 4898 pr_err("kvm: failed to shrink bus, removing it completely\n"); 4899 for (j = 0; j < bus->dev_count; j++) { 4900 if (j == i) 4901 continue; 4902 kvm_iodevice_destructor(bus->range[j].dev); 4903 } 4904 } 4905 4906 kfree(bus); 4907 return new_bus ? 0 : -ENOMEM; 4908 } 4909 4910 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 4911 gpa_t addr) 4912 { 4913 struct kvm_io_bus *bus; 4914 int dev_idx, srcu_idx; 4915 struct kvm_io_device *iodev = NULL; 4916 4917 srcu_idx = srcu_read_lock(&kvm->srcu); 4918 4919 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 4920 if (!bus) 4921 goto out_unlock; 4922 4923 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 4924 if (dev_idx < 0) 4925 goto out_unlock; 4926 4927 iodev = bus->range[dev_idx].dev; 4928 4929 out_unlock: 4930 srcu_read_unlock(&kvm->srcu, srcu_idx); 4931 4932 return iodev; 4933 } 4934 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); 4935 4936 static int kvm_debugfs_open(struct inode *inode, struct file *file, 4937 int (*get)(void *, u64 *), int (*set)(void *, u64), 4938 const char *fmt) 4939 { 4940 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4941 inode->i_private; 4942 4943 /* The debugfs files are a reference to the kvm struct which 4944 * is still valid when kvm_destroy_vm is called. 4945 * To avoid the race between open and the removal of the debugfs 4946 * directory we test against the users count. 4947 */ 4948 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) 4949 return -ENOENT; 4950 4951 if (simple_attr_open(inode, file, get, 4952 kvm_stats_debugfs_mode(stat_data->desc) & 0222 4953 ? set : NULL, 4954 fmt)) { 4955 kvm_put_kvm(stat_data->kvm); 4956 return -ENOMEM; 4957 } 4958 4959 return 0; 4960 } 4961 4962 static int kvm_debugfs_release(struct inode *inode, struct file *file) 4963 { 4964 struct kvm_stat_data *stat_data = (struct kvm_stat_data *) 4965 inode->i_private; 4966 4967 simple_attr_release(inode, file); 4968 kvm_put_kvm(stat_data->kvm); 4969 4970 return 0; 4971 } 4972 4973 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) 4974 { 4975 *val = *(u64 *)((void *)(&kvm->stat) + offset); 4976 4977 return 0; 4978 } 4979 4980 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) 4981 { 4982 *(u64 *)((void *)(&kvm->stat) + offset) = 0; 4983 4984 return 0; 4985 } 4986 4987 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) 4988 { 4989 int i; 4990 struct kvm_vcpu *vcpu; 4991 4992 *val = 0; 4993 4994 kvm_for_each_vcpu(i, vcpu, kvm) 4995 *val += *(u64 *)((void *)(&vcpu->stat) + offset); 4996 4997 return 0; 4998 } 4999 5000 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) 5001 { 5002 int i; 5003 struct kvm_vcpu *vcpu; 5004 5005 kvm_for_each_vcpu(i, vcpu, kvm) 5006 *(u64 *)((void *)(&vcpu->stat) + offset) = 0; 5007 5008 return 0; 5009 } 5010 5011 static int kvm_stat_data_get(void *data, u64 *val) 5012 { 5013 int r = -EFAULT; 5014 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5015 5016 switch (stat_data->kind) { 5017 case KVM_STAT_VM: 5018 r = kvm_get_stat_per_vm(stat_data->kvm, 5019 stat_data->desc->desc.offset, val); 5020 break; 5021 case KVM_STAT_VCPU: 5022 r = kvm_get_stat_per_vcpu(stat_data->kvm, 5023 stat_data->desc->desc.offset, val); 5024 break; 5025 } 5026 5027 return r; 5028 } 5029 5030 static int kvm_stat_data_clear(void *data, u64 val) 5031 { 5032 int r = -EFAULT; 5033 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data; 5034 5035 if (val) 5036 return -EINVAL; 5037 5038 switch (stat_data->kind) { 5039 case KVM_STAT_VM: 5040 r = kvm_clear_stat_per_vm(stat_data->kvm, 5041 stat_data->desc->desc.offset); 5042 break; 5043 case KVM_STAT_VCPU: 5044 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 5045 stat_data->desc->desc.offset); 5046 break; 5047 } 5048 5049 return r; 5050 } 5051 5052 static int kvm_stat_data_open(struct inode *inode, struct file *file) 5053 { 5054 __simple_attr_check_format("%llu\n", 0ull); 5055 return kvm_debugfs_open(inode, file, kvm_stat_data_get, 5056 kvm_stat_data_clear, "%llu\n"); 5057 } 5058 5059 static const struct file_operations stat_fops_per_vm = { 5060 .owner = THIS_MODULE, 5061 .open = kvm_stat_data_open, 5062 .release = kvm_debugfs_release, 5063 .read = simple_attr_read, 5064 .write = simple_attr_write, 5065 .llseek = no_llseek, 5066 }; 5067 5068 static int vm_stat_get(void *_offset, u64 *val) 5069 { 5070 unsigned offset = (long)_offset; 5071 struct kvm *kvm; 5072 u64 tmp_val; 5073 5074 *val = 0; 5075 mutex_lock(&kvm_lock); 5076 list_for_each_entry(kvm, &vm_list, vm_list) { 5077 kvm_get_stat_per_vm(kvm, offset, &tmp_val); 5078 *val += tmp_val; 5079 } 5080 mutex_unlock(&kvm_lock); 5081 return 0; 5082 } 5083 5084 static int vm_stat_clear(void *_offset, u64 val) 5085 { 5086 unsigned offset = (long)_offset; 5087 struct kvm *kvm; 5088 5089 if (val) 5090 return -EINVAL; 5091 5092 mutex_lock(&kvm_lock); 5093 list_for_each_entry(kvm, &vm_list, vm_list) { 5094 kvm_clear_stat_per_vm(kvm, offset); 5095 } 5096 mutex_unlock(&kvm_lock); 5097 5098 return 0; 5099 } 5100 5101 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n"); 5102 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n"); 5103 5104 static int vcpu_stat_get(void *_offset, u64 *val) 5105 { 5106 unsigned offset = (long)_offset; 5107 struct kvm *kvm; 5108 u64 tmp_val; 5109 5110 *val = 0; 5111 mutex_lock(&kvm_lock); 5112 list_for_each_entry(kvm, &vm_list, vm_list) { 5113 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); 5114 *val += tmp_val; 5115 } 5116 mutex_unlock(&kvm_lock); 5117 return 0; 5118 } 5119 5120 static int vcpu_stat_clear(void *_offset, u64 val) 5121 { 5122 unsigned offset = (long)_offset; 5123 struct kvm *kvm; 5124 5125 if (val) 5126 return -EINVAL; 5127 5128 mutex_lock(&kvm_lock); 5129 list_for_each_entry(kvm, &vm_list, vm_list) { 5130 kvm_clear_stat_per_vcpu(kvm, offset); 5131 } 5132 mutex_unlock(&kvm_lock); 5133 5134 return 0; 5135 } 5136 5137 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, 5138 "%llu\n"); 5139 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); 5140 5141 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 5142 { 5143 struct kobj_uevent_env *env; 5144 unsigned long long created, active; 5145 5146 if (!kvm_dev.this_device || !kvm) 5147 return; 5148 5149 mutex_lock(&kvm_lock); 5150 if (type == KVM_EVENT_CREATE_VM) { 5151 kvm_createvm_count++; 5152 kvm_active_vms++; 5153 } else if (type == KVM_EVENT_DESTROY_VM) { 5154 kvm_active_vms--; 5155 } 5156 created = kvm_createvm_count; 5157 active = kvm_active_vms; 5158 mutex_unlock(&kvm_lock); 5159 5160 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT); 5161 if (!env) 5162 return; 5163 5164 add_uevent_var(env, "CREATED=%llu", created); 5165 add_uevent_var(env, "COUNT=%llu", active); 5166 5167 if (type == KVM_EVENT_CREATE_VM) { 5168 add_uevent_var(env, "EVENT=create"); 5169 kvm->userspace_pid = task_pid_nr(current); 5170 } else if (type == KVM_EVENT_DESTROY_VM) { 5171 add_uevent_var(env, "EVENT=destroy"); 5172 } 5173 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 5174 5175 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { 5176 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT); 5177 5178 if (p) { 5179 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 5180 if (!IS_ERR(tmp)) 5181 add_uevent_var(env, "STATS_PATH=%s", tmp); 5182 kfree(p); 5183 } 5184 } 5185 /* no need for checks, since we are adding at most only 5 keys */ 5186 env->envp[env->envp_idx++] = NULL; 5187 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 5188 kfree(env); 5189 } 5190 5191 static void kvm_init_debug(void) 5192 { 5193 const struct file_operations *fops; 5194 const struct _kvm_stats_desc *pdesc; 5195 int i; 5196 5197 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 5198 5199 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) { 5200 pdesc = &kvm_vm_stats_desc[i]; 5201 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5202 fops = &vm_stat_fops; 5203 else 5204 fops = &vm_stat_readonly_fops; 5205 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5206 kvm_debugfs_dir, 5207 (void *)(long)pdesc->desc.offset, fops); 5208 } 5209 5210 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { 5211 pdesc = &kvm_vcpu_stats_desc[i]; 5212 if (kvm_stats_debugfs_mode(pdesc) & 0222) 5213 fops = &vcpu_stat_fops; 5214 else 5215 fops = &vcpu_stat_readonly_fops; 5216 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 5217 kvm_debugfs_dir, 5218 (void *)(long)pdesc->desc.offset, fops); 5219 } 5220 } 5221 5222 static int kvm_suspend(void) 5223 { 5224 if (kvm_usage_count) 5225 hardware_disable_nolock(NULL); 5226 return 0; 5227 } 5228 5229 static void kvm_resume(void) 5230 { 5231 if (kvm_usage_count) { 5232 #ifdef CONFIG_LOCKDEP 5233 WARN_ON(lockdep_is_held(&kvm_count_lock)); 5234 #endif 5235 hardware_enable_nolock(NULL); 5236 } 5237 } 5238 5239 static struct syscore_ops kvm_syscore_ops = { 5240 .suspend = kvm_suspend, 5241 .resume = kvm_resume, 5242 }; 5243 5244 static inline 5245 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 5246 { 5247 return container_of(pn, struct kvm_vcpu, preempt_notifier); 5248 } 5249 5250 static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 5251 { 5252 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5253 5254 WRITE_ONCE(vcpu->preempted, false); 5255 WRITE_ONCE(vcpu->ready, false); 5256 5257 __this_cpu_write(kvm_running_vcpu, vcpu); 5258 kvm_arch_sched_in(vcpu, cpu); 5259 kvm_arch_vcpu_load(vcpu, cpu); 5260 } 5261 5262 static void kvm_sched_out(struct preempt_notifier *pn, 5263 struct task_struct *next) 5264 { 5265 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 5266 5267 if (current->on_rq) { 5268 WRITE_ONCE(vcpu->preempted, true); 5269 WRITE_ONCE(vcpu->ready, true); 5270 } 5271 kvm_arch_vcpu_put(vcpu); 5272 __this_cpu_write(kvm_running_vcpu, NULL); 5273 } 5274 5275 /** 5276 * kvm_get_running_vcpu - get the vcpu running on the current CPU. 5277 * 5278 * We can disable preemption locally around accessing the per-CPU variable, 5279 * and use the resolved vcpu pointer after enabling preemption again, 5280 * because even if the current thread is migrated to another CPU, reading 5281 * the per-CPU value later will give us the same value as we update the 5282 * per-CPU variable in the preempt notifier handlers. 5283 */ 5284 struct kvm_vcpu *kvm_get_running_vcpu(void) 5285 { 5286 struct kvm_vcpu *vcpu; 5287 5288 preempt_disable(); 5289 vcpu = __this_cpu_read(kvm_running_vcpu); 5290 preempt_enable(); 5291 5292 return vcpu; 5293 } 5294 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu); 5295 5296 /** 5297 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus. 5298 */ 5299 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) 5300 { 5301 return &kvm_running_vcpu; 5302 } 5303 5304 struct kvm_cpu_compat_check { 5305 void *opaque; 5306 int *ret; 5307 }; 5308 5309 static void check_processor_compat(void *data) 5310 { 5311 struct kvm_cpu_compat_check *c = data; 5312 5313 *c->ret = kvm_arch_check_processor_compat(c->opaque); 5314 } 5315 5316 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 5317 struct module *module) 5318 { 5319 struct kvm_cpu_compat_check c; 5320 int r; 5321 int cpu; 5322 5323 r = kvm_arch_init(opaque); 5324 if (r) 5325 goto out_fail; 5326 5327 /* 5328 * kvm_arch_init makes sure there's at most one caller 5329 * for architectures that support multiple implementations, 5330 * like intel and amd on x86. 5331 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 5332 * conflicts in case kvm is already setup for another implementation. 5333 */ 5334 r = kvm_irqfd_init(); 5335 if (r) 5336 goto out_irqfd; 5337 5338 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 5339 r = -ENOMEM; 5340 goto out_free_0; 5341 } 5342 5343 r = kvm_arch_hardware_setup(opaque); 5344 if (r < 0) 5345 goto out_free_1; 5346 5347 c.ret = &r; 5348 c.opaque = opaque; 5349 for_each_online_cpu(cpu) { 5350 smp_call_function_single(cpu, check_processor_compat, &c, 1); 5351 if (r < 0) 5352 goto out_free_2; 5353 } 5354 5355 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting", 5356 kvm_starting_cpu, kvm_dying_cpu); 5357 if (r) 5358 goto out_free_2; 5359 register_reboot_notifier(&kvm_reboot_notifier); 5360 5361 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 5362 if (!vcpu_align) 5363 vcpu_align = __alignof__(struct kvm_vcpu); 5364 kvm_vcpu_cache = 5365 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, 5366 SLAB_ACCOUNT, 5367 offsetof(struct kvm_vcpu, arch), 5368 offsetofend(struct kvm_vcpu, stats_id) 5369 - offsetof(struct kvm_vcpu, arch), 5370 NULL); 5371 if (!kvm_vcpu_cache) { 5372 r = -ENOMEM; 5373 goto out_free_3; 5374 } 5375 5376 r = kvm_async_pf_init(); 5377 if (r) 5378 goto out_free; 5379 5380 kvm_chardev_ops.owner = module; 5381 kvm_vm_fops.owner = module; 5382 kvm_vcpu_fops.owner = module; 5383 5384 r = misc_register(&kvm_dev); 5385 if (r) { 5386 pr_err("kvm: misc device register failed\n"); 5387 goto out_unreg; 5388 } 5389 5390 register_syscore_ops(&kvm_syscore_ops); 5391 5392 kvm_preempt_ops.sched_in = kvm_sched_in; 5393 kvm_preempt_ops.sched_out = kvm_sched_out; 5394 5395 kvm_init_debug(); 5396 5397 r = kvm_vfio_ops_init(); 5398 WARN_ON(r); 5399 5400 return 0; 5401 5402 out_unreg: 5403 kvm_async_pf_deinit(); 5404 out_free: 5405 kmem_cache_destroy(kvm_vcpu_cache); 5406 out_free_3: 5407 unregister_reboot_notifier(&kvm_reboot_notifier); 5408 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5409 out_free_2: 5410 kvm_arch_hardware_unsetup(); 5411 out_free_1: 5412 free_cpumask_var(cpus_hardware_enabled); 5413 out_free_0: 5414 kvm_irqfd_exit(); 5415 out_irqfd: 5416 kvm_arch_exit(); 5417 out_fail: 5418 return r; 5419 } 5420 EXPORT_SYMBOL_GPL(kvm_init); 5421 5422 void kvm_exit(void) 5423 { 5424 debugfs_remove_recursive(kvm_debugfs_dir); 5425 misc_deregister(&kvm_dev); 5426 kmem_cache_destroy(kvm_vcpu_cache); 5427 kvm_async_pf_deinit(); 5428 unregister_syscore_ops(&kvm_syscore_ops); 5429 unregister_reboot_notifier(&kvm_reboot_notifier); 5430 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); 5431 on_each_cpu(hardware_disable_nolock, NULL, 1); 5432 kvm_arch_hardware_unsetup(); 5433 kvm_arch_exit(); 5434 kvm_irqfd_exit(); 5435 free_cpumask_var(cpus_hardware_enabled); 5436 kvm_vfio_ops_exit(); 5437 } 5438 EXPORT_SYMBOL_GPL(kvm_exit); 5439 5440 struct kvm_vm_worker_thread_context { 5441 struct kvm *kvm; 5442 struct task_struct *parent; 5443 struct completion init_done; 5444 kvm_vm_thread_fn_t thread_fn; 5445 uintptr_t data; 5446 int err; 5447 }; 5448 5449 static int kvm_vm_worker_thread(void *context) 5450 { 5451 /* 5452 * The init_context is allocated on the stack of the parent thread, so 5453 * we have to locally copy anything that is needed beyond initialization 5454 */ 5455 struct kvm_vm_worker_thread_context *init_context = context; 5456 struct kvm *kvm = init_context->kvm; 5457 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; 5458 uintptr_t data = init_context->data; 5459 int err; 5460 5461 err = kthread_park(current); 5462 /* kthread_park(current) is never supposed to return an error */ 5463 WARN_ON(err != 0); 5464 if (err) 5465 goto init_complete; 5466 5467 err = cgroup_attach_task_all(init_context->parent, current); 5468 if (err) { 5469 kvm_err("%s: cgroup_attach_task_all failed with err %d\n", 5470 __func__, err); 5471 goto init_complete; 5472 } 5473 5474 set_user_nice(current, task_nice(init_context->parent)); 5475 5476 init_complete: 5477 init_context->err = err; 5478 complete(&init_context->init_done); 5479 init_context = NULL; 5480 5481 if (err) 5482 return err; 5483 5484 /* Wait to be woken up by the spawner before proceeding. */ 5485 kthread_parkme(); 5486 5487 if (!kthread_should_stop()) 5488 err = thread_fn(kvm, data); 5489 5490 return err; 5491 } 5492 5493 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, 5494 uintptr_t data, const char *name, 5495 struct task_struct **thread_ptr) 5496 { 5497 struct kvm_vm_worker_thread_context init_context = {}; 5498 struct task_struct *thread; 5499 5500 *thread_ptr = NULL; 5501 init_context.kvm = kvm; 5502 init_context.parent = current; 5503 init_context.thread_fn = thread_fn; 5504 init_context.data = data; 5505 init_completion(&init_context.init_done); 5506 5507 thread = kthread_run(kvm_vm_worker_thread, &init_context, 5508 "%s-%d", name, task_pid_nr(current)); 5509 if (IS_ERR(thread)) 5510 return PTR_ERR(thread); 5511 5512 /* kthread_run is never supposed to return NULL */ 5513 WARN_ON(thread == NULL); 5514 5515 wait_for_completion(&init_context.init_done); 5516 5517 if (!init_context.err) 5518 *thread_ptr = thread; 5519 5520 return init_context.err; 5521 } 5522