1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine (KVM) Hypervisor
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 */
12
13 #include <kvm/iodev.h>
14
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/slab.h>
45 #include <linux/sort.h>
46 #include <linux/bsearch.h>
47 #include <linux/io.h>
48 #include <linux/lockdep.h>
49 #include <linux/kthread.h>
50 #include <linux/suspend.h>
51 #include <linux/rseq.h>
52
53 #include <asm/processor.h>
54 #include <asm/ioctl.h>
55 #include <linux/uaccess.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59 #include "kvm_mm.h"
60 #include "vfio.h"
61
62 #include <trace/events/ipi.h>
63
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66
67 #include <linux/kvm_dirty_ring.h>
68
69
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72
73 MODULE_AUTHOR("Qumranet");
74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
75 MODULE_LICENSE("GPL");
76
77 /* Architectures should define their poll value according to the halt latency */
78 unsigned int __read_mostly halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
79 module_param(halt_poll_ns, uint, 0644);
80 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
81
82 /* Default doubles per-vcpu halt_poll_ns. */
83 unsigned int __read_mostly halt_poll_ns_grow = 2;
84 module_param(halt_poll_ns_grow, uint, 0644);
85 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
86
87 /* The start value to grow halt_poll_ns from */
88 unsigned int __read_mostly halt_poll_ns_grow_start = 10000; /* 10us */
89 module_param(halt_poll_ns_grow_start, uint, 0644);
90 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
91
92 /* Default halves per-vcpu halt_poll_ns. */
93 unsigned int __read_mostly halt_poll_ns_shrink = 2;
94 module_param(halt_poll_ns_shrink, uint, 0644);
95 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
96
97 /*
98 * Allow direct access (from KVM or the CPU) without MMU notifier protection
99 * to unpinned pages.
100 */
101 static bool __ro_after_init allow_unsafe_mappings;
102 module_param(allow_unsafe_mappings, bool, 0444);
103
104 /*
105 * Ordering of locks:
106 *
107 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
108 */
109
110 DEFINE_MUTEX(kvm_lock);
111 LIST_HEAD(vm_list);
112
113 static struct kmem_cache *kvm_vcpu_cache;
114
115 static __read_mostly struct preempt_ops kvm_preempt_ops;
116 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
117
118 static struct dentry *kvm_debugfs_dir;
119
120 static const struct file_operations stat_fops_per_vm;
121
122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
123 unsigned long arg);
124 #ifdef CONFIG_KVM_COMPAT
125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
126 unsigned long arg);
127 #define KVM_COMPAT(c) .compat_ioctl = (c)
128 #else
129 /*
130 * For architectures that don't implement a compat infrastructure,
131 * adopt a double line of defense:
132 * - Prevent a compat task from opening /dev/kvm
133 * - If the open has been done by a 64bit task, and the KVM fd
134 * passed to a compat task, let the ioctls fail.
135 */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
137 unsigned long arg) { return -EINVAL; }
138
kvm_no_compat_open(struct inode * inode,struct file * file)139 static int kvm_no_compat_open(struct inode *inode, struct file *file)
140 {
141 return is_compat_task() ? -ENODEV : 0;
142 }
143 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
144 .open = kvm_no_compat_open
145 #endif
146
147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
148
149 #define KVM_EVENT_CREATE_VM 0
150 #define KVM_EVENT_DESTROY_VM 1
151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
152 static unsigned long long kvm_createvm_count;
153 static unsigned long long kvm_active_vms;
154
155 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
156
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
158 {
159 }
160
161 /*
162 * Switches to specified vcpu, until a matching vcpu_put()
163 */
vcpu_load(struct kvm_vcpu * vcpu)164 void vcpu_load(struct kvm_vcpu *vcpu)
165 {
166 int cpu = get_cpu();
167
168 __this_cpu_write(kvm_running_vcpu, vcpu);
169 preempt_notifier_register(&vcpu->preempt_notifier);
170 kvm_arch_vcpu_load(vcpu, cpu);
171 put_cpu();
172 }
173 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
174
vcpu_put(struct kvm_vcpu * vcpu)175 void vcpu_put(struct kvm_vcpu *vcpu)
176 {
177 preempt_disable();
178 kvm_arch_vcpu_put(vcpu);
179 preempt_notifier_unregister(&vcpu->preempt_notifier);
180 __this_cpu_write(kvm_running_vcpu, NULL);
181 preempt_enable();
182 }
183 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
184
185 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)186 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
187 {
188 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
189
190 /*
191 * We need to wait for the VCPU to reenable interrupts and get out of
192 * READING_SHADOW_PAGE_TABLES mode.
193 */
194 if (req & KVM_REQUEST_WAIT)
195 return mode != OUTSIDE_GUEST_MODE;
196
197 /*
198 * Need to kick a running VCPU, but otherwise there is nothing to do.
199 */
200 return mode == IN_GUEST_MODE;
201 }
202
ack_kick(void * _completed)203 static void ack_kick(void *_completed)
204 {
205 }
206
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)207 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
208 {
209 if (cpumask_empty(cpus))
210 return false;
211
212 smp_call_function_many(cpus, ack_kick, NULL, wait);
213 return true;
214 }
215
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)216 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
217 struct cpumask *tmp, int current_cpu)
218 {
219 int cpu;
220
221 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
222 __kvm_make_request(req, vcpu);
223
224 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
225 return;
226
227 /*
228 * Note, the vCPU could get migrated to a different pCPU at any point
229 * after kvm_request_needs_ipi(), which could result in sending an IPI
230 * to the previous pCPU. But, that's OK because the purpose of the IPI
231 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
232 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
233 * after this point is also OK, as the requirement is only that KVM wait
234 * for vCPUs that were reading SPTEs _before_ any changes were
235 * finalized. See kvm_vcpu_kick() for more details on handling requests.
236 */
237 if (kvm_request_needs_ipi(vcpu, req)) {
238 cpu = READ_ONCE(vcpu->cpu);
239 if (cpu != -1 && cpu != current_cpu)
240 __cpumask_set_cpu(cpu, tmp);
241 }
242 }
243
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)244 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
245 unsigned long *vcpu_bitmap)
246 {
247 struct kvm_vcpu *vcpu;
248 struct cpumask *cpus;
249 int i, me;
250 bool called;
251
252 me = get_cpu();
253
254 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
255 cpumask_clear(cpus);
256
257 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
258 vcpu = kvm_get_vcpu(kvm, i);
259 if (!vcpu)
260 continue;
261 kvm_make_vcpu_request(vcpu, req, cpus, me);
262 }
263
264 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
265 put_cpu();
266
267 return called;
268 }
269
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)270 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
271 {
272 struct kvm_vcpu *vcpu;
273 struct cpumask *cpus;
274 unsigned long i;
275 bool called;
276 int me;
277
278 me = get_cpu();
279
280 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
281 cpumask_clear(cpus);
282
283 kvm_for_each_vcpu(i, vcpu, kvm)
284 kvm_make_vcpu_request(vcpu, req, cpus, me);
285
286 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
287 put_cpu();
288
289 return called;
290 }
291 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
292
kvm_flush_remote_tlbs(struct kvm * kvm)293 void kvm_flush_remote_tlbs(struct kvm *kvm)
294 {
295 ++kvm->stat.generic.remote_tlb_flush_requests;
296
297 /*
298 * We want to publish modifications to the page tables before reading
299 * mode. Pairs with a memory barrier in arch-specific code.
300 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
301 * and smp_mb in walk_shadow_page_lockless_begin/end.
302 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
303 *
304 * There is already an smp_mb__after_atomic() before
305 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
306 * barrier here.
307 */
308 if (!kvm_arch_flush_remote_tlbs(kvm)
309 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
310 ++kvm->stat.generic.remote_tlb_flush;
311 }
312 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
313
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)314 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
315 {
316 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
317 return;
318
319 /*
320 * Fall back to a flushing entire TLBs if the architecture range-based
321 * TLB invalidation is unsupported or can't be performed for whatever
322 * reason.
323 */
324 kvm_flush_remote_tlbs(kvm);
325 }
326
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)327 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
328 const struct kvm_memory_slot *memslot)
329 {
330 /*
331 * All current use cases for flushing the TLBs for a specific memslot
332 * are related to dirty logging, and many do the TLB flush out of
333 * mmu_lock. The interaction between the various operations on memslot
334 * must be serialized by slots_lock to ensure the TLB flush from one
335 * operation is observed by any other operation on the same memslot.
336 */
337 lockdep_assert_held(&kvm->slots_lock);
338 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
339 }
340
kvm_flush_shadow_all(struct kvm * kvm)341 static void kvm_flush_shadow_all(struct kvm *kvm)
342 {
343 kvm_arch_flush_shadow_all(kvm);
344 kvm_arch_guest_memory_reclaimed(kvm);
345 }
346
347 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)348 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
349 gfp_t gfp_flags)
350 {
351 void *page;
352
353 gfp_flags |= mc->gfp_zero;
354
355 if (mc->kmem_cache)
356 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
357
358 page = (void *)__get_free_page(gfp_flags);
359 if (page && mc->init_value)
360 memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
361 return page;
362 }
363
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)364 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
365 {
366 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
367 void *obj;
368
369 if (mc->nobjs >= min)
370 return 0;
371
372 if (unlikely(!mc->objects)) {
373 if (WARN_ON_ONCE(!capacity))
374 return -EIO;
375
376 /*
377 * Custom init values can be used only for page allocations,
378 * and obviously conflict with __GFP_ZERO.
379 */
380 if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
381 return -EIO;
382
383 mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
384 if (!mc->objects)
385 return -ENOMEM;
386
387 mc->capacity = capacity;
388 }
389
390 /* It is illegal to request a different capacity across topups. */
391 if (WARN_ON_ONCE(mc->capacity != capacity))
392 return -EIO;
393
394 while (mc->nobjs < mc->capacity) {
395 obj = mmu_memory_cache_alloc_obj(mc, gfp);
396 if (!obj)
397 return mc->nobjs >= min ? 0 : -ENOMEM;
398 mc->objects[mc->nobjs++] = obj;
399 }
400 return 0;
401 }
402
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)403 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
404 {
405 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
406 }
407
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)408 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
409 {
410 return mc->nobjs;
411 }
412
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)413 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
414 {
415 while (mc->nobjs) {
416 if (mc->kmem_cache)
417 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
418 else
419 free_page((unsigned long)mc->objects[--mc->nobjs]);
420 }
421
422 kvfree(mc->objects);
423
424 mc->objects = NULL;
425 mc->capacity = 0;
426 }
427
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)428 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
429 {
430 void *p;
431
432 if (WARN_ON(!mc->nobjs))
433 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
434 else
435 p = mc->objects[--mc->nobjs];
436 BUG_ON(!p);
437 return p;
438 }
439 #endif
440
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)441 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
442 {
443 mutex_init(&vcpu->mutex);
444 vcpu->cpu = -1;
445 vcpu->kvm = kvm;
446 vcpu->vcpu_id = id;
447 vcpu->pid = NULL;
448 rwlock_init(&vcpu->pid_lock);
449 #ifndef __KVM_HAVE_ARCH_WQP
450 rcuwait_init(&vcpu->wait);
451 #endif
452 kvm_async_pf_vcpu_init(vcpu);
453
454 kvm_vcpu_set_in_spin_loop(vcpu, false);
455 kvm_vcpu_set_dy_eligible(vcpu, false);
456 vcpu->preempted = false;
457 vcpu->ready = false;
458 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
459 vcpu->last_used_slot = NULL;
460
461 /* Fill the stats id string for the vcpu */
462 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
463 task_pid_nr(current), id);
464 }
465
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)466 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
467 {
468 kvm_arch_vcpu_destroy(vcpu);
469 kvm_dirty_ring_free(&vcpu->dirty_ring);
470
471 /*
472 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
473 * the vcpu->pid pointer, and at destruction time all file descriptors
474 * are already gone.
475 */
476 put_pid(vcpu->pid);
477
478 free_page((unsigned long)vcpu->run);
479 kmem_cache_free(kvm_vcpu_cache, vcpu);
480 }
481
kvm_destroy_vcpus(struct kvm * kvm)482 void kvm_destroy_vcpus(struct kvm *kvm)
483 {
484 unsigned long i;
485 struct kvm_vcpu *vcpu;
486
487 kvm_for_each_vcpu(i, vcpu, kvm) {
488 kvm_vcpu_destroy(vcpu);
489 xa_erase(&kvm->vcpu_array, i);
490
491 /*
492 * Assert that the vCPU isn't visible in any way, to ensure KVM
493 * doesn't trigger a use-after-free if destroying vCPUs results
494 * in VM-wide request, e.g. to flush remote TLBs when tearing
495 * down MMUs, or to mark the VM dead if a KVM_BUG_ON() fires.
496 */
497 WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i));
498 }
499
500 atomic_set(&kvm->online_vcpus, 0);
501 }
502 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
503
mmu_notifier_to_kvm(struct mmu_notifier * mn)504 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
505 {
506 return container_of(mn, struct kvm, mmu_notifier);
507 }
508
509 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
510
511 typedef void (*on_lock_fn_t)(struct kvm *kvm);
512
513 struct kvm_mmu_notifier_range {
514 /*
515 * 64-bit addresses, as KVM notifiers can operate on host virtual
516 * addresses (unsigned long) and guest physical addresses (64-bit).
517 */
518 u64 start;
519 u64 end;
520 union kvm_mmu_notifier_arg arg;
521 gfn_handler_t handler;
522 on_lock_fn_t on_lock;
523 bool flush_on_ret;
524 bool may_block;
525 bool lockless;
526 };
527
528 /*
529 * The inner-most helper returns a tuple containing the return value from the
530 * arch- and action-specific handler, plus a flag indicating whether or not at
531 * least one memslot was found, i.e. if the handler found guest memory.
532 *
533 * Note, most notifiers are averse to booleans, so even though KVM tracks the
534 * return from arch code as a bool, outer helpers will cast it to an int. :-(
535 */
536 typedef struct kvm_mmu_notifier_return {
537 bool ret;
538 bool found_memslot;
539 } kvm_mn_ret_t;
540
541 /*
542 * Use a dedicated stub instead of NULL to indicate that there is no callback
543 * function/handler. The compiler technically can't guarantee that a real
544 * function will have a non-zero address, and so it will generate code to
545 * check for !NULL, whereas comparing against a stub will be elided at compile
546 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
547 */
kvm_null_fn(void)548 static void kvm_null_fn(void)
549 {
550
551 }
552 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
553
554 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
555 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
556 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
557 node; \
558 node = interval_tree_iter_next(node, start, last)) \
559
kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range)560 static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
561 const struct kvm_mmu_notifier_range *range)
562 {
563 struct kvm_mmu_notifier_return r = {
564 .ret = false,
565 .found_memslot = false,
566 };
567 struct kvm_gfn_range gfn_range;
568 struct kvm_memory_slot *slot;
569 struct kvm_memslots *slots;
570 int i, idx;
571
572 if (WARN_ON_ONCE(range->end <= range->start))
573 return r;
574
575 /* A null handler is allowed if and only if on_lock() is provided. */
576 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
577 IS_KVM_NULL_FN(range->handler)))
578 return r;
579
580 /* on_lock will never be called for lockless walks */
581 if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
582 return r;
583
584 idx = srcu_read_lock(&kvm->srcu);
585
586 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
587 struct interval_tree_node *node;
588
589 slots = __kvm_memslots(kvm, i);
590 kvm_for_each_memslot_in_hva_range(node, slots,
591 range->start, range->end - 1) {
592 unsigned long hva_start, hva_end;
593
594 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
595 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
596 hva_end = min_t(unsigned long, range->end,
597 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
598
599 /*
600 * To optimize for the likely case where the address
601 * range is covered by zero or one memslots, don't
602 * bother making these conditional (to avoid writes on
603 * the second or later invocation of the handler).
604 */
605 gfn_range.arg = range->arg;
606 gfn_range.may_block = range->may_block;
607 /*
608 * HVA-based notifications aren't relevant to private
609 * mappings as they don't have a userspace mapping.
610 */
611 gfn_range.attr_filter = KVM_FILTER_SHARED;
612
613 /*
614 * {gfn(page) | page intersects with [hva_start, hva_end)} =
615 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
616 */
617 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
618 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
619 gfn_range.slot = slot;
620 gfn_range.lockless = range->lockless;
621
622 if (!r.found_memslot) {
623 r.found_memslot = true;
624 if (!range->lockless) {
625 KVM_MMU_LOCK(kvm);
626 if (!IS_KVM_NULL_FN(range->on_lock))
627 range->on_lock(kvm);
628
629 if (IS_KVM_NULL_FN(range->handler))
630 goto mmu_unlock;
631 }
632 }
633 r.ret |= range->handler(kvm, &gfn_range);
634 }
635 }
636
637 if (range->flush_on_ret && r.ret)
638 kvm_flush_remote_tlbs(kvm);
639
640 mmu_unlock:
641 if (r.found_memslot && !range->lockless)
642 KVM_MMU_UNLOCK(kvm);
643
644 srcu_read_unlock(&kvm->srcu, idx);
645
646 return r;
647 }
648
kvm_age_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler,bool flush_on_ret)649 static __always_inline bool kvm_age_hva_range(struct mmu_notifier *mn,
650 unsigned long start, unsigned long end, gfn_handler_t handler,
651 bool flush_on_ret)
652 {
653 struct kvm *kvm = mmu_notifier_to_kvm(mn);
654 const struct kvm_mmu_notifier_range range = {
655 .start = start,
656 .end = end,
657 .handler = handler,
658 .on_lock = (void *)kvm_null_fn,
659 .flush_on_ret = flush_on_ret,
660 .may_block = false,
661 .lockless = IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING),
662 };
663
664 return kvm_handle_hva_range(kvm, &range).ret;
665 }
666
kvm_age_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)667 static __always_inline bool kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
668 unsigned long start, unsigned long end, gfn_handler_t handler)
669 {
670 return kvm_age_hva_range(mn, start, end, handler, false);
671 }
672
kvm_mmu_invalidate_begin(struct kvm * kvm)673 void kvm_mmu_invalidate_begin(struct kvm *kvm)
674 {
675 lockdep_assert_held_write(&kvm->mmu_lock);
676 /*
677 * The count increase must become visible at unlock time as no
678 * spte can be established without taking the mmu_lock and
679 * count is also read inside the mmu_lock critical section.
680 */
681 kvm->mmu_invalidate_in_progress++;
682
683 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
684 kvm->mmu_invalidate_range_start = INVALID_GPA;
685 kvm->mmu_invalidate_range_end = INVALID_GPA;
686 }
687 }
688
kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end)689 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
690 {
691 lockdep_assert_held_write(&kvm->mmu_lock);
692
693 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
694
695 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
696 kvm->mmu_invalidate_range_start = start;
697 kvm->mmu_invalidate_range_end = end;
698 } else {
699 /*
700 * Fully tracking multiple concurrent ranges has diminishing
701 * returns. Keep things simple and just find the minimal range
702 * which includes the current and new ranges. As there won't be
703 * enough information to subtract a range after its invalidate
704 * completes, any ranges invalidated concurrently will
705 * accumulate and persist until all outstanding invalidates
706 * complete.
707 */
708 kvm->mmu_invalidate_range_start =
709 min(kvm->mmu_invalidate_range_start, start);
710 kvm->mmu_invalidate_range_end =
711 max(kvm->mmu_invalidate_range_end, end);
712 }
713 }
714
kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)715 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
716 {
717 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
718 return kvm_unmap_gfn_range(kvm, range);
719 }
720
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)721 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
722 const struct mmu_notifier_range *range)
723 {
724 struct kvm *kvm = mmu_notifier_to_kvm(mn);
725 const struct kvm_mmu_notifier_range hva_range = {
726 .start = range->start,
727 .end = range->end,
728 .handler = kvm_mmu_unmap_gfn_range,
729 .on_lock = kvm_mmu_invalidate_begin,
730 .flush_on_ret = true,
731 .may_block = mmu_notifier_range_blockable(range),
732 };
733
734 trace_kvm_unmap_hva_range(range->start, range->end);
735
736 /*
737 * Prevent memslot modification between range_start() and range_end()
738 * so that conditionally locking provides the same result in both
739 * functions. Without that guarantee, the mmu_invalidate_in_progress
740 * adjustments will be imbalanced.
741 *
742 * Pairs with the decrement in range_end().
743 */
744 spin_lock(&kvm->mn_invalidate_lock);
745 kvm->mn_active_invalidate_count++;
746 spin_unlock(&kvm->mn_invalidate_lock);
747
748 /*
749 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
750 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
751 * each cache's lock. There are relatively few caches in existence at
752 * any given time, and the caches themselves can check for hva overlap,
753 * i.e. don't need to rely on memslot overlap checks for performance.
754 * Because this runs without holding mmu_lock, the pfn caches must use
755 * mn_active_invalidate_count (see above) instead of
756 * mmu_invalidate_in_progress.
757 */
758 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
759
760 /*
761 * If one or more memslots were found and thus zapped, notify arch code
762 * that guest memory has been reclaimed. This needs to be done *after*
763 * dropping mmu_lock, as x86's reclaim path is slooooow.
764 */
765 if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
766 kvm_arch_guest_memory_reclaimed(kvm);
767
768 return 0;
769 }
770
kvm_mmu_invalidate_end(struct kvm * kvm)771 void kvm_mmu_invalidate_end(struct kvm *kvm)
772 {
773 lockdep_assert_held_write(&kvm->mmu_lock);
774
775 /*
776 * This sequence increase will notify the kvm page fault that
777 * the page that is going to be mapped in the spte could have
778 * been freed.
779 */
780 kvm->mmu_invalidate_seq++;
781 smp_wmb();
782 /*
783 * The above sequence increase must be visible before the
784 * below count decrease, which is ensured by the smp_wmb above
785 * in conjunction with the smp_rmb in mmu_invalidate_retry().
786 */
787 kvm->mmu_invalidate_in_progress--;
788 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
789
790 /*
791 * Assert that at least one range was added between start() and end().
792 * Not adding a range isn't fatal, but it is a KVM bug.
793 */
794 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
795 }
796
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)797 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
798 const struct mmu_notifier_range *range)
799 {
800 struct kvm *kvm = mmu_notifier_to_kvm(mn);
801 const struct kvm_mmu_notifier_range hva_range = {
802 .start = range->start,
803 .end = range->end,
804 .handler = (void *)kvm_null_fn,
805 .on_lock = kvm_mmu_invalidate_end,
806 .flush_on_ret = false,
807 .may_block = mmu_notifier_range_blockable(range),
808 };
809 bool wake;
810
811 kvm_handle_hva_range(kvm, &hva_range);
812
813 /* Pairs with the increment in range_start(). */
814 spin_lock(&kvm->mn_invalidate_lock);
815 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
816 --kvm->mn_active_invalidate_count;
817 wake = !kvm->mn_active_invalidate_count;
818 spin_unlock(&kvm->mn_invalidate_lock);
819
820 /*
821 * There can only be one waiter, since the wait happens under
822 * slots_lock.
823 */
824 if (wake)
825 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
826 }
827
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)828 static bool kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
829 struct mm_struct *mm, unsigned long start, unsigned long end)
830 {
831 trace_kvm_age_hva(start, end);
832
833 return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
834 !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
835 }
836
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)837 static bool kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
838 struct mm_struct *mm, unsigned long start, unsigned long end)
839 {
840 trace_kvm_age_hva(start, end);
841
842 /*
843 * Even though we do not flush TLB, this will still adversely
844 * affect performance on pre-Haswell Intel EPT, where there is
845 * no EPT Access Bit to clear so that we have to tear down EPT
846 * tables instead. If we find this unacceptable, we can always
847 * add a parameter to kvm_age_hva so that it effectively doesn't
848 * do anything on clear_young.
849 *
850 * Also note that currently we never issue secondary TLB flushes
851 * from clear_young, leaving this job up to the regular system
852 * cadence. If we find this inaccurate, we might come up with a
853 * more sophisticated heuristic later.
854 */
855 return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
856 }
857
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)858 static bool kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
859 struct mm_struct *mm, unsigned long address)
860 {
861 trace_kvm_test_age_hva(address);
862
863 return kvm_age_hva_range_no_flush(mn, address, address + 1,
864 kvm_test_age_gfn);
865 }
866
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)867 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
868 struct mm_struct *mm)
869 {
870 struct kvm *kvm = mmu_notifier_to_kvm(mn);
871 int idx;
872
873 idx = srcu_read_lock(&kvm->srcu);
874 kvm_flush_shadow_all(kvm);
875 srcu_read_unlock(&kvm->srcu, idx);
876 }
877
878 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
879 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
880 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
881 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
882 .clear_young = kvm_mmu_notifier_clear_young,
883 .test_young = kvm_mmu_notifier_test_young,
884 .release = kvm_mmu_notifier_release,
885 };
886
kvm_init_mmu_notifier(struct kvm * kvm)887 static int kvm_init_mmu_notifier(struct kvm *kvm)
888 {
889 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
890 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
891 }
892
893 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)894 static int kvm_pm_notifier_call(struct notifier_block *bl,
895 unsigned long state,
896 void *unused)
897 {
898 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
899
900 return kvm_arch_pm_notifier(kvm, state);
901 }
902
kvm_init_pm_notifier(struct kvm * kvm)903 static void kvm_init_pm_notifier(struct kvm *kvm)
904 {
905 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
906 /* Suspend KVM before we suspend ftrace, RCU, etc. */
907 kvm->pm_notifier.priority = INT_MAX;
908 register_pm_notifier(&kvm->pm_notifier);
909 }
910
kvm_destroy_pm_notifier(struct kvm * kvm)911 static void kvm_destroy_pm_notifier(struct kvm *kvm)
912 {
913 unregister_pm_notifier(&kvm->pm_notifier);
914 }
915 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)916 static void kvm_init_pm_notifier(struct kvm *kvm)
917 {
918 }
919
kvm_destroy_pm_notifier(struct kvm * kvm)920 static void kvm_destroy_pm_notifier(struct kvm *kvm)
921 {
922 }
923 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
924
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)925 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
926 {
927 if (!memslot->dirty_bitmap)
928 return;
929
930 vfree(memslot->dirty_bitmap);
931 memslot->dirty_bitmap = NULL;
932 }
933
934 /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)935 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
936 {
937 if (slot->flags & KVM_MEM_GUEST_MEMFD)
938 kvm_gmem_unbind(slot);
939
940 kvm_destroy_dirty_bitmap(slot);
941
942 kvm_arch_free_memslot(kvm, slot);
943
944 kfree(slot);
945 }
946
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)947 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
948 {
949 struct hlist_node *idnode;
950 struct kvm_memory_slot *memslot;
951 int bkt;
952
953 /*
954 * The same memslot objects live in both active and inactive sets,
955 * arbitrarily free using index '1' so the second invocation of this
956 * function isn't operating over a structure with dangling pointers
957 * (even though this function isn't actually touching them).
958 */
959 if (!slots->node_idx)
960 return;
961
962 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
963 kvm_free_memslot(kvm, memslot);
964 }
965
kvm_stats_debugfs_mode(const struct kvm_stats_desc * desc)966 static umode_t kvm_stats_debugfs_mode(const struct kvm_stats_desc *desc)
967 {
968 switch (desc->flags & KVM_STATS_TYPE_MASK) {
969 case KVM_STATS_TYPE_INSTANT:
970 return 0444;
971 case KVM_STATS_TYPE_CUMULATIVE:
972 case KVM_STATS_TYPE_PEAK:
973 default:
974 return 0644;
975 }
976 }
977
978
kvm_destroy_vm_debugfs(struct kvm * kvm)979 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
980 {
981 int i;
982 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
983 kvm_vcpu_stats_header.num_desc;
984
985 if (IS_ERR(kvm->debugfs_dentry))
986 return;
987
988 debugfs_remove_recursive(kvm->debugfs_dentry);
989
990 if (kvm->debugfs_stat_data) {
991 for (i = 0; i < kvm_debugfs_num_entries; i++)
992 kfree(kvm->debugfs_stat_data[i]);
993 kfree(kvm->debugfs_stat_data);
994 }
995 }
996
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)997 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
998 {
999 static DEFINE_MUTEX(kvm_debugfs_lock);
1000 struct dentry *dent;
1001 char dir_name[ITOA_MAX_LEN * 2];
1002 struct kvm_stat_data *stat_data;
1003 const struct kvm_stats_desc *pdesc;
1004 int i, ret = -ENOMEM;
1005 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1006 kvm_vcpu_stats_header.num_desc;
1007
1008 if (!debugfs_initialized())
1009 return 0;
1010
1011 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1012 mutex_lock(&kvm_debugfs_lock);
1013 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1014 if (dent) {
1015 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1016 dput(dent);
1017 mutex_unlock(&kvm_debugfs_lock);
1018 return 0;
1019 }
1020 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1021 mutex_unlock(&kvm_debugfs_lock);
1022 if (IS_ERR(dent))
1023 return 0;
1024
1025 kvm->debugfs_dentry = dent;
1026 kvm->debugfs_stat_data = kzalloc_objs(*kvm->debugfs_stat_data,
1027 kvm_debugfs_num_entries,
1028 GFP_KERNEL_ACCOUNT);
1029 if (!kvm->debugfs_stat_data)
1030 goto out_err;
1031
1032 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1033 pdesc = &kvm_vm_stats_desc[i];
1034 stat_data = kzalloc_obj(*stat_data, GFP_KERNEL_ACCOUNT);
1035 if (!stat_data)
1036 goto out_err;
1037
1038 stat_data->kvm = kvm;
1039 stat_data->desc = pdesc;
1040 stat_data->kind = KVM_STAT_VM;
1041 kvm->debugfs_stat_data[i] = stat_data;
1042 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1043 kvm->debugfs_dentry, stat_data,
1044 &stat_fops_per_vm);
1045 }
1046
1047 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1048 pdesc = &kvm_vcpu_stats_desc[i];
1049 stat_data = kzalloc_obj(*stat_data, GFP_KERNEL_ACCOUNT);
1050 if (!stat_data)
1051 goto out_err;
1052
1053 stat_data->kvm = kvm;
1054 stat_data->desc = pdesc;
1055 stat_data->kind = KVM_STAT_VCPU;
1056 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1057 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1058 kvm->debugfs_dentry, stat_data,
1059 &stat_fops_per_vm);
1060 }
1061
1062 kvm_arch_create_vm_debugfs(kvm);
1063 return 0;
1064 out_err:
1065 kvm_destroy_vm_debugfs(kvm);
1066 return ret;
1067 }
1068
1069 /*
1070 * Called just after removing the VM from the vm_list, but before doing any
1071 * other destruction.
1072 */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1073 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1074 {
1075 }
1076
1077 /*
1078 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1079 * be setup already, so we can create arch-specific debugfs entries under it.
1080 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1081 * a per-arch destroy interface is not needed.
1082 */
kvm_arch_create_vm_debugfs(struct kvm * kvm)1083 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1084 {
1085 }
1086
1087 /* Called only on cleanup and destruction paths when there are no users. */
kvm_get_bus_for_destruction(struct kvm * kvm,enum kvm_bus idx)1088 static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
1089 enum kvm_bus idx)
1090 {
1091 return rcu_dereference_protected(kvm->buses[idx],
1092 !refcount_read(&kvm->users_count));
1093 }
1094
1095 static int kvm_enable_virtualization(void);
1096 static void kvm_disable_virtualization(void);
1097
kvm_create_vm(unsigned long type,const char * fdname)1098 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1099 {
1100 struct kvm *kvm = kvm_arch_alloc_vm();
1101 struct kvm_memslots *slots;
1102 int r, i, j;
1103
1104 if (!kvm)
1105 return ERR_PTR(-ENOMEM);
1106
1107 KVM_MMU_LOCK_INIT(kvm);
1108 mmgrab(current->mm);
1109 kvm->mm = current->mm;
1110 kvm_eventfd_init(kvm);
1111 mutex_init(&kvm->lock);
1112 mutex_init(&kvm->irq_lock);
1113 mutex_init(&kvm->slots_lock);
1114 mutex_init(&kvm->slots_arch_lock);
1115 spin_lock_init(&kvm->mn_invalidate_lock);
1116 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1117 xa_init(&kvm->vcpu_array);
1118 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1119 xa_init(&kvm->mem_attr_array);
1120 #endif
1121
1122 INIT_LIST_HEAD(&kvm->gpc_list);
1123 spin_lock_init(&kvm->gpc_lock);
1124
1125 INIT_LIST_HEAD(&kvm->devices);
1126 kvm->max_vcpus = KVM_MAX_VCPUS;
1127
1128 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1129
1130 /*
1131 * Force subsequent debugfs file creations to fail if the VM directory
1132 * is not created (by kvm_create_vm_debugfs()).
1133 */
1134 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1135
1136 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1137 task_pid_nr(current));
1138
1139 r = -ENOMEM;
1140 if (init_srcu_struct(&kvm->srcu))
1141 goto out_err_no_srcu;
1142 if (init_srcu_struct(&kvm->irq_srcu))
1143 goto out_err_no_irq_srcu;
1144
1145 r = kvm_init_irq_routing(kvm);
1146 if (r)
1147 goto out_err_no_irq_routing;
1148
1149 refcount_set(&kvm->users_count, 1);
1150
1151 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1152 for (j = 0; j < 2; j++) {
1153 slots = &kvm->__memslots[i][j];
1154
1155 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1156 slots->hva_tree = RB_ROOT_CACHED;
1157 slots->gfn_tree = RB_ROOT;
1158 hash_init(slots->id_hash);
1159 slots->node_idx = j;
1160
1161 /* Generations must be different for each address space. */
1162 slots->generation = i;
1163 }
1164
1165 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1166 }
1167
1168 r = -ENOMEM;
1169 for (i = 0; i < KVM_NR_BUSES; i++) {
1170 rcu_assign_pointer(kvm->buses[i],
1171 kzalloc_obj(struct kvm_io_bus, GFP_KERNEL_ACCOUNT));
1172 if (!kvm->buses[i])
1173 goto out_err_no_arch_destroy_vm;
1174 }
1175
1176 r = kvm_arch_init_vm(kvm, type);
1177 if (r)
1178 goto out_err_no_arch_destroy_vm;
1179
1180 r = kvm_enable_virtualization();
1181 if (r)
1182 goto out_err_no_disable;
1183
1184 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1185 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1186 #endif
1187
1188 r = kvm_init_mmu_notifier(kvm);
1189 if (r)
1190 goto out_err_no_mmu_notifier;
1191
1192 r = kvm_coalesced_mmio_init(kvm);
1193 if (r < 0)
1194 goto out_no_coalesced_mmio;
1195
1196 r = kvm_create_vm_debugfs(kvm, fdname);
1197 if (r)
1198 goto out_err_no_debugfs;
1199
1200 mutex_lock(&kvm_lock);
1201 list_add(&kvm->vm_list, &vm_list);
1202 mutex_unlock(&kvm_lock);
1203
1204 preempt_notifier_inc();
1205 kvm_init_pm_notifier(kvm);
1206
1207 return kvm;
1208
1209 out_err_no_debugfs:
1210 kvm_coalesced_mmio_free(kvm);
1211 out_no_coalesced_mmio:
1212 if (kvm->mmu_notifier.ops)
1213 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1214 out_err_no_mmu_notifier:
1215 kvm_disable_virtualization();
1216 out_err_no_disable:
1217 kvm_arch_destroy_vm(kvm);
1218 out_err_no_arch_destroy_vm:
1219 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1220 for (i = 0; i < KVM_NR_BUSES; i++)
1221 kfree(kvm_get_bus_for_destruction(kvm, i));
1222 kvm_free_irq_routing(kvm);
1223 out_err_no_irq_routing:
1224 cleanup_srcu_struct(&kvm->irq_srcu);
1225 out_err_no_irq_srcu:
1226 cleanup_srcu_struct(&kvm->srcu);
1227 out_err_no_srcu:
1228 kvm_arch_free_vm(kvm);
1229 mmdrop(current->mm);
1230 return ERR_PTR(r);
1231 }
1232
kvm_destroy_devices(struct kvm * kvm)1233 static void kvm_destroy_devices(struct kvm *kvm)
1234 {
1235 struct kvm_device *dev, *tmp;
1236
1237 /*
1238 * We do not need to take the kvm->lock here, because nobody else
1239 * has a reference to the struct kvm at this point and therefore
1240 * cannot access the devices list anyhow.
1241 *
1242 * The device list is generally managed as an rculist, but list_del()
1243 * is used intentionally here. If a bug in KVM introduced a reader that
1244 * was not backed by a reference on the kvm struct, the hope is that
1245 * it'd consume the poisoned forward pointer instead of suffering a
1246 * use-after-free, even though this cannot be guaranteed.
1247 */
1248 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1249 list_del(&dev->vm_node);
1250 dev->ops->destroy(dev);
1251 }
1252 }
1253
kvm_destroy_vm(struct kvm * kvm)1254 static void kvm_destroy_vm(struct kvm *kvm)
1255 {
1256 int i;
1257 struct mm_struct *mm = kvm->mm;
1258
1259 kvm_destroy_pm_notifier(kvm);
1260 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1261 kvm_destroy_vm_debugfs(kvm);
1262 mutex_lock(&kvm_lock);
1263 list_del(&kvm->vm_list);
1264 mutex_unlock(&kvm_lock);
1265 kvm_arch_pre_destroy_vm(kvm);
1266
1267 kvm_free_irq_routing(kvm);
1268 for (i = 0; i < KVM_NR_BUSES; i++) {
1269 struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
1270
1271 if (bus)
1272 kvm_io_bus_destroy(bus);
1273 kvm->buses[i] = NULL;
1274 }
1275 kvm_coalesced_mmio_free(kvm);
1276 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1277 /*
1278 * At this point, pending calls to invalidate_range_start()
1279 * have completed but no more MMU notifiers will run, so
1280 * mn_active_invalidate_count may remain unbalanced.
1281 * No threads can be waiting in kvm_swap_active_memslots() as the
1282 * last reference on KVM has been dropped, but freeing
1283 * memslots would deadlock without this manual intervention.
1284 *
1285 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1286 * notifier between a start() and end(), then there shouldn't be any
1287 * in-progress invalidations.
1288 */
1289 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1290 if (kvm->mn_active_invalidate_count)
1291 kvm->mn_active_invalidate_count = 0;
1292 else
1293 WARN_ON(kvm->mmu_invalidate_in_progress);
1294 kvm_arch_destroy_vm(kvm);
1295 kvm_destroy_devices(kvm);
1296 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1297 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1298 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1299 }
1300 cleanup_srcu_struct(&kvm->irq_srcu);
1301 srcu_barrier(&kvm->srcu);
1302 cleanup_srcu_struct(&kvm->srcu);
1303 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1304 xa_destroy(&kvm->mem_attr_array);
1305 #endif
1306 kvm_arch_free_vm(kvm);
1307 preempt_notifier_dec();
1308 kvm_disable_virtualization();
1309 mmdrop(mm);
1310 }
1311
kvm_get_kvm(struct kvm * kvm)1312 void kvm_get_kvm(struct kvm *kvm)
1313 {
1314 refcount_inc(&kvm->users_count);
1315 }
1316 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1317
1318 /*
1319 * Make sure the vm is not during destruction, which is a safe version of
1320 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1321 */
kvm_get_kvm_safe(struct kvm * kvm)1322 bool kvm_get_kvm_safe(struct kvm *kvm)
1323 {
1324 return refcount_inc_not_zero(&kvm->users_count);
1325 }
1326 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1327
kvm_put_kvm(struct kvm * kvm)1328 void kvm_put_kvm(struct kvm *kvm)
1329 {
1330 if (refcount_dec_and_test(&kvm->users_count))
1331 kvm_destroy_vm(kvm);
1332 }
1333 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1334
1335 /*
1336 * Used to put a reference that was taken on behalf of an object associated
1337 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1338 * of the new file descriptor fails and the reference cannot be transferred to
1339 * its final owner. In such cases, the caller is still actively using @kvm and
1340 * will fail miserably if the refcount unexpectedly hits zero.
1341 */
kvm_put_kvm_no_destroy(struct kvm * kvm)1342 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1343 {
1344 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1345 }
1346 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
1347
kvm_vm_release(struct inode * inode,struct file * filp)1348 static int kvm_vm_release(struct inode *inode, struct file *filp)
1349 {
1350 struct kvm *kvm = filp->private_data;
1351
1352 kvm_irqfd_release(kvm);
1353
1354 kvm_put_kvm(kvm);
1355 return 0;
1356 }
1357
kvm_trylock_all_vcpus(struct kvm * kvm)1358 int kvm_trylock_all_vcpus(struct kvm *kvm)
1359 {
1360 struct kvm_vcpu *vcpu;
1361 unsigned long i, j;
1362
1363 lockdep_assert_held(&kvm->lock);
1364
1365 kvm_for_each_vcpu(i, vcpu, kvm)
1366 if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock))
1367 goto out_unlock;
1368 return 0;
1369
1370 out_unlock:
1371 kvm_for_each_vcpu(j, vcpu, kvm) {
1372 if (i == j)
1373 break;
1374 mutex_unlock(&vcpu->mutex);
1375 }
1376 return -EINTR;
1377 }
1378 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
1379
kvm_lock_all_vcpus(struct kvm * kvm)1380 int kvm_lock_all_vcpus(struct kvm *kvm)
1381 {
1382 struct kvm_vcpu *vcpu;
1383 unsigned long i, j;
1384 int r;
1385
1386 lockdep_assert_held(&kvm->lock);
1387
1388 kvm_for_each_vcpu(i, vcpu, kvm) {
1389 r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock);
1390 if (r)
1391 goto out_unlock;
1392 }
1393 return 0;
1394
1395 out_unlock:
1396 kvm_for_each_vcpu(j, vcpu, kvm) {
1397 if (i == j)
1398 break;
1399 mutex_unlock(&vcpu->mutex);
1400 }
1401 return r;
1402 }
1403 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
1404
kvm_unlock_all_vcpus(struct kvm * kvm)1405 void kvm_unlock_all_vcpus(struct kvm *kvm)
1406 {
1407 struct kvm_vcpu *vcpu;
1408 unsigned long i;
1409
1410 lockdep_assert_held(&kvm->lock);
1411
1412 kvm_for_each_vcpu(i, vcpu, kvm)
1413 mutex_unlock(&vcpu->mutex);
1414 }
1415 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
1416
1417 /*
1418 * Allocation size is twice as large as the actual dirty bitmap size.
1419 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1420 */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)1421 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1422 {
1423 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1424
1425 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1426 if (!memslot->dirty_bitmap)
1427 return -ENOMEM;
1428
1429 return 0;
1430 }
1431
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)1432 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1433 {
1434 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1435 int node_idx_inactive = active->node_idx ^ 1;
1436
1437 return &kvm->__memslots[as_id][node_idx_inactive];
1438 }
1439
1440 /*
1441 * Helper to get the address space ID when one of memslot pointers may be NULL.
1442 * This also serves as a sanity that at least one of the pointers is non-NULL,
1443 * and that their address space IDs don't diverge.
1444 */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1445 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1446 struct kvm_memory_slot *b)
1447 {
1448 if (WARN_ON_ONCE(!a && !b))
1449 return 0;
1450
1451 if (!a)
1452 return b->as_id;
1453 if (!b)
1454 return a->as_id;
1455
1456 WARN_ON_ONCE(a->as_id != b->as_id);
1457 return a->as_id;
1458 }
1459
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1460 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1461 struct kvm_memory_slot *slot)
1462 {
1463 struct rb_root *gfn_tree = &slots->gfn_tree;
1464 struct rb_node **node, *parent;
1465 int idx = slots->node_idx;
1466
1467 parent = NULL;
1468 for (node = &gfn_tree->rb_node; *node; ) {
1469 struct kvm_memory_slot *tmp;
1470
1471 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1472 parent = *node;
1473 if (slot->base_gfn < tmp->base_gfn)
1474 node = &(*node)->rb_left;
1475 else if (slot->base_gfn > tmp->base_gfn)
1476 node = &(*node)->rb_right;
1477 else
1478 BUG();
1479 }
1480
1481 rb_link_node(&slot->gfn_node[idx], parent, node);
1482 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1483 }
1484
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1485 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1486 struct kvm_memory_slot *slot)
1487 {
1488 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1489 }
1490
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1491 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1492 struct kvm_memory_slot *old,
1493 struct kvm_memory_slot *new)
1494 {
1495 int idx = slots->node_idx;
1496
1497 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1498
1499 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1500 &slots->gfn_tree);
1501 }
1502
1503 /*
1504 * Replace @old with @new in the inactive memslots.
1505 *
1506 * With NULL @old this simply adds @new.
1507 * With NULL @new this simply removes @old.
1508 *
1509 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1510 * appropriately.
1511 */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1512 static void kvm_replace_memslot(struct kvm *kvm,
1513 struct kvm_memory_slot *old,
1514 struct kvm_memory_slot *new)
1515 {
1516 int as_id = kvm_memslots_get_as_id(old, new);
1517 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1518 int idx = slots->node_idx;
1519
1520 if (old) {
1521 hash_del(&old->id_node[idx]);
1522 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1523
1524 if ((long)old == atomic_long_read(&slots->last_used_slot))
1525 atomic_long_set(&slots->last_used_slot, (long)new);
1526
1527 if (!new) {
1528 kvm_erase_gfn_node(slots, old);
1529 return;
1530 }
1531 }
1532
1533 /*
1534 * Initialize @new's hva range. Do this even when replacing an @old
1535 * slot, kvm_copy_memslot() deliberately does not touch node data.
1536 */
1537 new->hva_node[idx].start = new->userspace_addr;
1538 new->hva_node[idx].last = new->userspace_addr +
1539 (new->npages << PAGE_SHIFT) - 1;
1540
1541 /*
1542 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1543 * hva_node needs to be swapped with remove+insert even though hva can't
1544 * change when replacing an existing slot.
1545 */
1546 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1547 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1548
1549 /*
1550 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1551 * switch the node in the gfn tree instead of removing the old and
1552 * inserting the new as two separate operations. Replacement is a
1553 * single O(1) operation versus two O(log(n)) operations for
1554 * remove+insert.
1555 */
1556 if (old && old->base_gfn == new->base_gfn) {
1557 kvm_replace_gfn_node(slots, old, new);
1558 } else {
1559 if (old)
1560 kvm_erase_gfn_node(slots, old);
1561 kvm_insert_gfn_node(slots, new);
1562 }
1563 }
1564
1565 /*
1566 * Flags that do not access any of the extra space of struct
1567 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1568 * only allows these.
1569 */
1570 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1571 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1572
check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1573 static int check_memory_region_flags(struct kvm *kvm,
1574 const struct kvm_userspace_memory_region2 *mem)
1575 {
1576 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1577
1578 if (IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
1579 valid_flags |= KVM_MEM_GUEST_MEMFD;
1580
1581 /* Dirty logging private memory is not currently supported. */
1582 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1583 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1584
1585 /*
1586 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1587 * read-only memslots have emulated MMIO, not page fault, semantics,
1588 * and KVM doesn't allow emulated MMIO for private memory.
1589 */
1590 if (kvm_arch_has_readonly_mem(kvm) &&
1591 !(mem->flags & KVM_MEM_GUEST_MEMFD))
1592 valid_flags |= KVM_MEM_READONLY;
1593
1594 if (mem->flags & ~valid_flags)
1595 return -EINVAL;
1596
1597 return 0;
1598 }
1599
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1600 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1601 {
1602 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1603
1604 /* Grab the generation from the activate memslots. */
1605 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1606
1607 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1608 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1609
1610 /*
1611 * Do not store the new memslots while there are invalidations in
1612 * progress, otherwise the locking in invalidate_range_start and
1613 * invalidate_range_end will be unbalanced.
1614 */
1615 spin_lock(&kvm->mn_invalidate_lock);
1616 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1617 while (kvm->mn_active_invalidate_count) {
1618 set_current_state(TASK_UNINTERRUPTIBLE);
1619 spin_unlock(&kvm->mn_invalidate_lock);
1620 schedule();
1621 spin_lock(&kvm->mn_invalidate_lock);
1622 }
1623 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1624 rcu_assign_pointer(kvm->memslots[as_id], slots);
1625 spin_unlock(&kvm->mn_invalidate_lock);
1626
1627 /*
1628 * Acquired in kvm_set_memslot. Must be released before synchronize
1629 * SRCU below in order to avoid deadlock with another thread
1630 * acquiring the slots_arch_lock in an srcu critical section.
1631 */
1632 mutex_unlock(&kvm->slots_arch_lock);
1633
1634 synchronize_srcu_expedited(&kvm->srcu);
1635
1636 /*
1637 * Increment the new memslot generation a second time, dropping the
1638 * update in-progress flag and incrementing the generation based on
1639 * the number of address spaces. This provides a unique and easily
1640 * identifiable generation number while the memslots are in flux.
1641 */
1642 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1643
1644 /*
1645 * Generations must be unique even across address spaces. We do not need
1646 * a global counter for that, instead the generation space is evenly split
1647 * across address spaces. For example, with two address spaces, address
1648 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1649 * use generations 1, 3, 5, ...
1650 */
1651 gen += kvm_arch_nr_memslot_as_ids(kvm);
1652
1653 kvm_arch_memslots_updated(kvm, gen);
1654
1655 slots->generation = gen;
1656 }
1657
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1658 static int kvm_prepare_memory_region(struct kvm *kvm,
1659 const struct kvm_memory_slot *old,
1660 struct kvm_memory_slot *new,
1661 enum kvm_mr_change change)
1662 {
1663 int r;
1664
1665 /*
1666 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1667 * will be freed on "commit". If logging is enabled in both old and
1668 * new, reuse the existing bitmap. If logging is enabled only in the
1669 * new and KVM isn't using a ring buffer, allocate and initialize a
1670 * new bitmap.
1671 */
1672 if (change != KVM_MR_DELETE) {
1673 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1674 new->dirty_bitmap = NULL;
1675 else if (old && old->dirty_bitmap)
1676 new->dirty_bitmap = old->dirty_bitmap;
1677 else if (kvm_use_dirty_bitmap(kvm)) {
1678 r = kvm_alloc_dirty_bitmap(new);
1679 if (r)
1680 return r;
1681
1682 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1683 bitmap_set(new->dirty_bitmap, 0, new->npages);
1684 }
1685 }
1686
1687 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1688
1689 /* Free the bitmap on failure if it was allocated above. */
1690 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1691 kvm_destroy_dirty_bitmap(new);
1692
1693 return r;
1694 }
1695
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1696 static void kvm_commit_memory_region(struct kvm *kvm,
1697 struct kvm_memory_slot *old,
1698 const struct kvm_memory_slot *new,
1699 enum kvm_mr_change change)
1700 {
1701 int old_flags = old ? old->flags : 0;
1702 int new_flags = new ? new->flags : 0;
1703 /*
1704 * Update the total number of memslot pages before calling the arch
1705 * hook so that architectures can consume the result directly.
1706 */
1707 if (change == KVM_MR_DELETE)
1708 kvm->nr_memslot_pages -= old->npages;
1709 else if (change == KVM_MR_CREATE)
1710 kvm->nr_memslot_pages += new->npages;
1711
1712 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1713 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1714 atomic_set(&kvm->nr_memslots_dirty_logging,
1715 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1716 }
1717
1718 kvm_arch_commit_memory_region(kvm, old, new, change);
1719
1720 switch (change) {
1721 case KVM_MR_CREATE:
1722 /* Nothing more to do. */
1723 break;
1724 case KVM_MR_DELETE:
1725 /* Free the old memslot and all its metadata. */
1726 kvm_free_memslot(kvm, old);
1727 break;
1728 case KVM_MR_MOVE:
1729 /*
1730 * Moving a guest_memfd memslot isn't supported, and will never
1731 * be supported.
1732 */
1733 WARN_ON_ONCE(old->flags & KVM_MEM_GUEST_MEMFD);
1734 fallthrough;
1735 case KVM_MR_FLAGS_ONLY:
1736 /*
1737 * Free the dirty bitmap as needed; the below check encompasses
1738 * both the flags and whether a ring buffer is being used)
1739 */
1740 if (old->dirty_bitmap && !new->dirty_bitmap)
1741 kvm_destroy_dirty_bitmap(old);
1742
1743 /*
1744 * Unbind the guest_memfd instance as needed; the @new slot has
1745 * already created its own binding. TODO: Drop the WARN when
1746 * dirty logging guest_memfd memslots is supported. Until then,
1747 * flags-only changes on guest_memfd slots should be impossible.
1748 */
1749 if (WARN_ON_ONCE(old->flags & KVM_MEM_GUEST_MEMFD))
1750 kvm_gmem_unbind(old);
1751
1752 /*
1753 * The final quirk. Free the detached, old slot, but only its
1754 * memory, not any metadata. Metadata, including arch specific
1755 * data, may be reused by @new.
1756 */
1757 kfree(old);
1758 break;
1759 default:
1760 BUG();
1761 }
1762 }
1763
1764 /*
1765 * Activate @new, which must be installed in the inactive slots by the caller,
1766 * by swapping the active slots and then propagating @new to @old once @old is
1767 * unreachable and can be safely modified.
1768 *
1769 * With NULL @old this simply adds @new to @active (while swapping the sets).
1770 * With NULL @new this simply removes @old from @active and frees it
1771 * (while also swapping the sets).
1772 */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1773 static void kvm_activate_memslot(struct kvm *kvm,
1774 struct kvm_memory_slot *old,
1775 struct kvm_memory_slot *new)
1776 {
1777 int as_id = kvm_memslots_get_as_id(old, new);
1778
1779 kvm_swap_active_memslots(kvm, as_id);
1780
1781 /* Propagate the new memslot to the now inactive memslots. */
1782 kvm_replace_memslot(kvm, old, new);
1783 }
1784
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1785 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1786 const struct kvm_memory_slot *src)
1787 {
1788 dest->base_gfn = src->base_gfn;
1789 dest->npages = src->npages;
1790 dest->dirty_bitmap = src->dirty_bitmap;
1791 dest->arch = src->arch;
1792 dest->userspace_addr = src->userspace_addr;
1793 dest->flags = src->flags;
1794 dest->id = src->id;
1795 dest->as_id = src->as_id;
1796 }
1797
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1798 static void kvm_invalidate_memslot(struct kvm *kvm,
1799 struct kvm_memory_slot *old,
1800 struct kvm_memory_slot *invalid_slot)
1801 {
1802 /*
1803 * Mark the current slot INVALID. As with all memslot modifications,
1804 * this must be done on an unreachable slot to avoid modifying the
1805 * current slot in the active tree.
1806 */
1807 kvm_copy_memslot(invalid_slot, old);
1808 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1809 kvm_replace_memslot(kvm, old, invalid_slot);
1810
1811 /*
1812 * Activate the slot that is now marked INVALID, but don't propagate
1813 * the slot to the now inactive slots. The slot is either going to be
1814 * deleted or recreated as a new slot.
1815 */
1816 kvm_swap_active_memslots(kvm, old->as_id);
1817
1818 /*
1819 * From this point no new shadow pages pointing to a deleted, or moved,
1820 * memslot will be created. Validation of sp->gfn happens in:
1821 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1822 * - kvm_is_visible_gfn (mmu_check_root)
1823 */
1824 kvm_arch_flush_shadow_memslot(kvm, old);
1825 kvm_arch_guest_memory_reclaimed(kvm);
1826
1827 /* Was released by kvm_swap_active_memslots(), reacquire. */
1828 mutex_lock(&kvm->slots_arch_lock);
1829
1830 /*
1831 * Copy the arch-specific field of the newly-installed slot back to the
1832 * old slot as the arch data could have changed between releasing
1833 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1834 * above. Writers are required to retrieve memslots *after* acquiring
1835 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1836 */
1837 old->arch = invalid_slot->arch;
1838 }
1839
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1840 static void kvm_create_memslot(struct kvm *kvm,
1841 struct kvm_memory_slot *new)
1842 {
1843 /* Add the new memslot to the inactive set and activate. */
1844 kvm_replace_memslot(kvm, NULL, new);
1845 kvm_activate_memslot(kvm, NULL, new);
1846 }
1847
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1848 static void kvm_delete_memslot(struct kvm *kvm,
1849 struct kvm_memory_slot *old,
1850 struct kvm_memory_slot *invalid_slot)
1851 {
1852 /*
1853 * Remove the old memslot (in the inactive memslots) by passing NULL as
1854 * the "new" slot, and for the invalid version in the active slots.
1855 */
1856 kvm_replace_memslot(kvm, old, NULL);
1857 kvm_activate_memslot(kvm, invalid_slot, NULL);
1858 }
1859
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1860 static void kvm_move_memslot(struct kvm *kvm,
1861 struct kvm_memory_slot *old,
1862 struct kvm_memory_slot *new,
1863 struct kvm_memory_slot *invalid_slot)
1864 {
1865 /*
1866 * Replace the old memslot in the inactive slots, and then swap slots
1867 * and replace the current INVALID with the new as well.
1868 */
1869 kvm_replace_memslot(kvm, old, new);
1870 kvm_activate_memslot(kvm, invalid_slot, new);
1871 }
1872
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1873 static void kvm_update_flags_memslot(struct kvm *kvm,
1874 struct kvm_memory_slot *old,
1875 struct kvm_memory_slot *new)
1876 {
1877 /*
1878 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1879 * an intermediate step. Instead, the old memslot is simply replaced
1880 * with a new, updated copy in both memslot sets.
1881 */
1882 kvm_replace_memslot(kvm, old, new);
1883 kvm_activate_memslot(kvm, old, new);
1884 }
1885
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1886 static int kvm_set_memslot(struct kvm *kvm,
1887 struct kvm_memory_slot *old,
1888 struct kvm_memory_slot *new,
1889 enum kvm_mr_change change)
1890 {
1891 struct kvm_memory_slot *invalid_slot;
1892 int r;
1893
1894 /*
1895 * Released in kvm_swap_active_memslots().
1896 *
1897 * Must be held from before the current memslots are copied until after
1898 * the new memslots are installed with rcu_assign_pointer, then
1899 * released before the synchronize srcu in kvm_swap_active_memslots().
1900 *
1901 * When modifying memslots outside of the slots_lock, must be held
1902 * before reading the pointer to the current memslots until after all
1903 * changes to those memslots are complete.
1904 *
1905 * These rules ensure that installing new memslots does not lose
1906 * changes made to the previous memslots.
1907 */
1908 mutex_lock(&kvm->slots_arch_lock);
1909
1910 /*
1911 * Invalidate the old slot if it's being deleted or moved. This is
1912 * done prior to actually deleting/moving the memslot to allow vCPUs to
1913 * continue running by ensuring there are no mappings or shadow pages
1914 * for the memslot when it is deleted/moved. Without pre-invalidation
1915 * (and without a lock), a window would exist between effecting the
1916 * delete/move and committing the changes in arch code where KVM or a
1917 * guest could access a non-existent memslot.
1918 *
1919 * Modifications are done on a temporary, unreachable slot. The old
1920 * slot needs to be preserved in case a later step fails and the
1921 * invalidation needs to be reverted.
1922 */
1923 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1924 invalid_slot = kzalloc_obj(*invalid_slot, GFP_KERNEL_ACCOUNT);
1925 if (!invalid_slot) {
1926 mutex_unlock(&kvm->slots_arch_lock);
1927 return -ENOMEM;
1928 }
1929 kvm_invalidate_memslot(kvm, old, invalid_slot);
1930 }
1931
1932 r = kvm_prepare_memory_region(kvm, old, new, change);
1933 if (r) {
1934 /*
1935 * For DELETE/MOVE, revert the above INVALID change. No
1936 * modifications required since the original slot was preserved
1937 * in the inactive slots. Changing the active memslots also
1938 * release slots_arch_lock.
1939 */
1940 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1941 kvm_activate_memslot(kvm, invalid_slot, old);
1942 kfree(invalid_slot);
1943 } else {
1944 mutex_unlock(&kvm->slots_arch_lock);
1945 }
1946 return r;
1947 }
1948
1949 /*
1950 * For DELETE and MOVE, the working slot is now active as the INVALID
1951 * version of the old slot. MOVE is particularly special as it reuses
1952 * the old slot and returns a copy of the old slot (in working_slot).
1953 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1954 * old slot is detached but otherwise preserved.
1955 */
1956 if (change == KVM_MR_CREATE)
1957 kvm_create_memslot(kvm, new);
1958 else if (change == KVM_MR_DELETE)
1959 kvm_delete_memslot(kvm, old, invalid_slot);
1960 else if (change == KVM_MR_MOVE)
1961 kvm_move_memslot(kvm, old, new, invalid_slot);
1962 else if (change == KVM_MR_FLAGS_ONLY)
1963 kvm_update_flags_memslot(kvm, old, new);
1964 else
1965 BUG();
1966
1967 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1968 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1969 kfree(invalid_slot);
1970
1971 /*
1972 * No need to refresh new->arch, changes after dropping slots_arch_lock
1973 * will directly hit the final, active memslot. Architectures are
1974 * responsible for knowing that new->arch may be stale.
1975 */
1976 kvm_commit_memory_region(kvm, old, new, change);
1977
1978 return 0;
1979 }
1980
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1981 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1982 gfn_t start, gfn_t end)
1983 {
1984 struct kvm_memslot_iter iter;
1985
1986 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1987 if (iter.slot->id != id)
1988 return true;
1989 }
1990
1991 return false;
1992 }
1993
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1994 static int kvm_set_memory_region(struct kvm *kvm,
1995 const struct kvm_userspace_memory_region2 *mem)
1996 {
1997 struct kvm_memory_slot *old, *new;
1998 struct kvm_memslots *slots;
1999 enum kvm_mr_change change;
2000 unsigned long npages;
2001 gfn_t base_gfn;
2002 int as_id, id;
2003 int r;
2004
2005 lockdep_assert_held(&kvm->slots_lock);
2006
2007 r = check_memory_region_flags(kvm, mem);
2008 if (r)
2009 return r;
2010
2011 as_id = mem->slot >> 16;
2012 id = (u16)mem->slot;
2013
2014 /* General sanity checks */
2015 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2016 (mem->memory_size != (unsigned long)mem->memory_size))
2017 return -EINVAL;
2018 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2019 return -EINVAL;
2020 /* We can read the guest memory with __xxx_user() later on. */
2021 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2022 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2023 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2024 mem->memory_size))
2025 return -EINVAL;
2026 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2027 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2028 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2029 return -EINVAL;
2030 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2031 return -EINVAL;
2032 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2033 return -EINVAL;
2034
2035 /*
2036 * The size of userspace-defined memory regions is restricted in order
2037 * to play nice with dirty bitmap operations, which are indexed with an
2038 * "unsigned int". KVM's internal memory regions don't support dirty
2039 * logging, and so are exempt.
2040 */
2041 if (id < KVM_USER_MEM_SLOTS &&
2042 (mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2043 return -EINVAL;
2044
2045 slots = __kvm_memslots(kvm, as_id);
2046
2047 /*
2048 * Note, the old memslot (and the pointer itself!) may be invalidated
2049 * and/or destroyed by kvm_set_memslot().
2050 */
2051 old = id_to_memslot(slots, id);
2052
2053 if (!mem->memory_size) {
2054 if (!old || !old->npages)
2055 return -EINVAL;
2056
2057 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2058 return -EIO;
2059
2060 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2061 }
2062
2063 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2064 npages = (mem->memory_size >> PAGE_SHIFT);
2065
2066 if (!old || !old->npages) {
2067 change = KVM_MR_CREATE;
2068
2069 /*
2070 * To simplify KVM internals, the total number of pages across
2071 * all memslots must fit in an unsigned long.
2072 */
2073 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2074 return -EINVAL;
2075 } else { /* Modify an existing slot. */
2076 /* Private memslots are immutable, they can only be deleted. */
2077 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2078 return -EINVAL;
2079 if ((mem->userspace_addr != old->userspace_addr) ||
2080 (npages != old->npages) ||
2081 ((mem->flags ^ old->flags) & (KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD)))
2082 return -EINVAL;
2083
2084 if (base_gfn != old->base_gfn)
2085 change = KVM_MR_MOVE;
2086 else if (mem->flags != old->flags)
2087 change = KVM_MR_FLAGS_ONLY;
2088 else /* Nothing to change. */
2089 return 0;
2090 }
2091
2092 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2093 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2094 return -EEXIST;
2095
2096 /* Allocate a slot that will persist in the memslot. */
2097 new = kzalloc_obj(*new, GFP_KERNEL_ACCOUNT);
2098 if (!new)
2099 return -ENOMEM;
2100
2101 new->as_id = as_id;
2102 new->id = id;
2103 new->base_gfn = base_gfn;
2104 new->npages = npages;
2105 new->flags = mem->flags;
2106 new->userspace_addr = mem->userspace_addr;
2107 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2108 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2109 if (r)
2110 goto out;
2111 }
2112
2113 r = kvm_set_memslot(kvm, old, new, change);
2114 if (r)
2115 goto out_unbind;
2116
2117 return 0;
2118
2119 out_unbind:
2120 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2121 kvm_gmem_unbind(new);
2122 out:
2123 kfree(new);
2124 return r;
2125 }
2126
kvm_set_internal_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2127 int kvm_set_internal_memslot(struct kvm *kvm,
2128 const struct kvm_userspace_memory_region2 *mem)
2129 {
2130 if (WARN_ON_ONCE(mem->slot < KVM_USER_MEM_SLOTS))
2131 return -EINVAL;
2132
2133 if (WARN_ON_ONCE(mem->flags))
2134 return -EINVAL;
2135
2136 return kvm_set_memory_region(kvm, mem);
2137 }
2138 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
2139
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem)2140 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2141 struct kvm_userspace_memory_region2 *mem)
2142 {
2143 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2144 return -EINVAL;
2145
2146 guard(mutex)(&kvm->slots_lock);
2147 return kvm_set_memory_region(kvm, mem);
2148 }
2149
2150 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2151 /**
2152 * kvm_get_dirty_log - get a snapshot of dirty pages
2153 * @kvm: pointer to kvm instance
2154 * @log: slot id and address to which we copy the log
2155 * @is_dirty: set to '1' if any dirty pages were found
2156 * @memslot: set to the associated memslot, always valid on success
2157 */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)2158 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2159 int *is_dirty, struct kvm_memory_slot **memslot)
2160 {
2161 struct kvm_memslots *slots;
2162 int i, as_id, id;
2163 unsigned long n;
2164 unsigned long any = 0;
2165
2166 /* Dirty ring tracking may be exclusive to dirty log tracking */
2167 if (!kvm_use_dirty_bitmap(kvm))
2168 return -ENXIO;
2169
2170 *memslot = NULL;
2171 *is_dirty = 0;
2172
2173 as_id = log->slot >> 16;
2174 id = (u16)log->slot;
2175 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2176 return -EINVAL;
2177
2178 slots = __kvm_memslots(kvm, as_id);
2179 *memslot = id_to_memslot(slots, id);
2180 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2181 return -ENOENT;
2182
2183 kvm_arch_sync_dirty_log(kvm, *memslot);
2184
2185 n = kvm_dirty_bitmap_bytes(*memslot);
2186
2187 for (i = 0; !any && i < n/sizeof(long); ++i)
2188 any = (*memslot)->dirty_bitmap[i];
2189
2190 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2191 return -EFAULT;
2192
2193 if (any)
2194 *is_dirty = 1;
2195 return 0;
2196 }
2197 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
2198
2199 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2200 /**
2201 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2202 * and reenable dirty page tracking for the corresponding pages.
2203 * @kvm: pointer to kvm instance
2204 * @log: slot id and address to which we copy the log
2205 *
2206 * We need to keep it in mind that VCPU threads can write to the bitmap
2207 * concurrently. So, to avoid losing track of dirty pages we keep the
2208 * following order:
2209 *
2210 * 1. Take a snapshot of the bit and clear it if needed.
2211 * 2. Write protect the corresponding page.
2212 * 3. Copy the snapshot to the userspace.
2213 * 4. Upon return caller flushes TLB's if needed.
2214 *
2215 * Between 2 and 4, the guest may write to the page using the remaining TLB
2216 * entry. This is not a problem because the page is reported dirty using
2217 * the snapshot taken before and step 4 ensures that writes done after
2218 * exiting to userspace will be logged for the next call.
2219 *
2220 */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)2221 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2222 {
2223 struct kvm_memslots *slots;
2224 struct kvm_memory_slot *memslot;
2225 int i, as_id, id;
2226 unsigned long n;
2227 unsigned long *dirty_bitmap;
2228 unsigned long *dirty_bitmap_buffer;
2229 bool flush;
2230
2231 /* Dirty ring tracking may be exclusive to dirty log tracking */
2232 if (!kvm_use_dirty_bitmap(kvm))
2233 return -ENXIO;
2234
2235 as_id = log->slot >> 16;
2236 id = (u16)log->slot;
2237 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2238 return -EINVAL;
2239
2240 slots = __kvm_memslots(kvm, as_id);
2241 memslot = id_to_memslot(slots, id);
2242 if (!memslot || !memslot->dirty_bitmap)
2243 return -ENOENT;
2244
2245 dirty_bitmap = memslot->dirty_bitmap;
2246
2247 kvm_arch_sync_dirty_log(kvm, memslot);
2248
2249 n = kvm_dirty_bitmap_bytes(memslot);
2250 flush = false;
2251 if (kvm->manual_dirty_log_protect) {
2252 /*
2253 * Unlike kvm_get_dirty_log, we always return false in *flush,
2254 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2255 * is some code duplication between this function and
2256 * kvm_get_dirty_log, but hopefully all architecture
2257 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2258 * can be eliminated.
2259 */
2260 dirty_bitmap_buffer = dirty_bitmap;
2261 } else {
2262 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2263 memset(dirty_bitmap_buffer, 0, n);
2264
2265 KVM_MMU_LOCK(kvm);
2266 for (i = 0; i < n / sizeof(long); i++) {
2267 unsigned long mask;
2268 gfn_t offset;
2269
2270 if (!dirty_bitmap[i])
2271 continue;
2272
2273 flush = true;
2274 mask = xchg(&dirty_bitmap[i], 0);
2275 dirty_bitmap_buffer[i] = mask;
2276
2277 offset = i * BITS_PER_LONG;
2278 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2279 offset, mask);
2280 }
2281 KVM_MMU_UNLOCK(kvm);
2282 }
2283
2284 if (flush)
2285 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2286
2287 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2288 return -EFAULT;
2289 return 0;
2290 }
2291
2292
2293 /**
2294 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2295 * @kvm: kvm instance
2296 * @log: slot id and address to which we copy the log
2297 *
2298 * Steps 1-4 below provide general overview of dirty page logging. See
2299 * kvm_get_dirty_log_protect() function description for additional details.
2300 *
2301 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2302 * always flush the TLB (step 4) even if previous step failed and the dirty
2303 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2304 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2305 * writes will be marked dirty for next log read.
2306 *
2307 * 1. Take a snapshot of the bit and clear it if needed.
2308 * 2. Write protect the corresponding page.
2309 * 3. Copy the snapshot to the userspace.
2310 * 4. Flush TLB's if needed.
2311 */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)2312 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2313 struct kvm_dirty_log *log)
2314 {
2315 int r;
2316
2317 mutex_lock(&kvm->slots_lock);
2318
2319 r = kvm_get_dirty_log_protect(kvm, log);
2320
2321 mutex_unlock(&kvm->slots_lock);
2322 return r;
2323 }
2324
2325 /**
2326 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2327 * and reenable dirty page tracking for the corresponding pages.
2328 * @kvm: pointer to kvm instance
2329 * @log: slot id and address from which to fetch the bitmap of dirty pages
2330 */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)2331 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2332 struct kvm_clear_dirty_log *log)
2333 {
2334 struct kvm_memslots *slots;
2335 struct kvm_memory_slot *memslot;
2336 int as_id, id;
2337 gfn_t offset;
2338 unsigned long i, n;
2339 unsigned long *dirty_bitmap;
2340 unsigned long *dirty_bitmap_buffer;
2341 bool flush;
2342
2343 /* Dirty ring tracking may be exclusive to dirty log tracking */
2344 if (!kvm_use_dirty_bitmap(kvm))
2345 return -ENXIO;
2346
2347 as_id = log->slot >> 16;
2348 id = (u16)log->slot;
2349 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2350 return -EINVAL;
2351
2352 if (log->first_page & 63)
2353 return -EINVAL;
2354
2355 slots = __kvm_memslots(kvm, as_id);
2356 memslot = id_to_memslot(slots, id);
2357 if (!memslot || !memslot->dirty_bitmap)
2358 return -ENOENT;
2359
2360 dirty_bitmap = memslot->dirty_bitmap;
2361
2362 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2363
2364 if (log->first_page > memslot->npages ||
2365 log->num_pages > memslot->npages - log->first_page ||
2366 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2367 return -EINVAL;
2368
2369 kvm_arch_sync_dirty_log(kvm, memslot);
2370
2371 flush = false;
2372 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2373 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2374 return -EFAULT;
2375
2376 KVM_MMU_LOCK(kvm);
2377 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2378 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2379 i++, offset += BITS_PER_LONG) {
2380 unsigned long mask = *dirty_bitmap_buffer++;
2381 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2382 if (!mask)
2383 continue;
2384
2385 mask &= atomic_long_fetch_andnot(mask, p);
2386
2387 /*
2388 * mask contains the bits that really have been cleared. This
2389 * never includes any bits beyond the length of the memslot (if
2390 * the length is not aligned to 64 pages), therefore it is not
2391 * a problem if userspace sets them in log->dirty_bitmap.
2392 */
2393 if (mask) {
2394 flush = true;
2395 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2396 offset, mask);
2397 }
2398 }
2399 KVM_MMU_UNLOCK(kvm);
2400
2401 if (flush)
2402 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2403
2404 return 0;
2405 }
2406
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2407 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2408 struct kvm_clear_dirty_log *log)
2409 {
2410 int r;
2411
2412 mutex_lock(&kvm->slots_lock);
2413
2414 r = kvm_clear_dirty_log_protect(kvm, log);
2415
2416 mutex_unlock(&kvm->slots_lock);
2417 return r;
2418 }
2419 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2420
2421 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_supported_mem_attributes(struct kvm * kvm)2422 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2423 {
2424 if (!kvm || kvm_arch_has_private_mem(kvm))
2425 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2426
2427 return 0;
2428 }
2429
2430 /*
2431 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2432 * such that the bits in @mask match @attrs.
2433 */
kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs)2434 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2435 unsigned long mask, unsigned long attrs)
2436 {
2437 XA_STATE(xas, &kvm->mem_attr_array, start);
2438 unsigned long index;
2439 void *entry;
2440
2441 mask &= kvm_supported_mem_attributes(kvm);
2442 if (attrs & ~mask)
2443 return false;
2444
2445 if (end == start + 1)
2446 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2447
2448 guard(rcu)();
2449 if (!attrs)
2450 return !xas_find(&xas, end - 1);
2451
2452 for (index = start; index < end; index++) {
2453 do {
2454 entry = xas_next(&xas);
2455 } while (xas_retry(&xas, entry));
2456
2457 if (xas.xa_index != index ||
2458 (xa_to_value(entry) & mask) != attrs)
2459 return false;
2460 }
2461
2462 return true;
2463 }
2464
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range)2465 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2466 struct kvm_mmu_notifier_range *range)
2467 {
2468 struct kvm_gfn_range gfn_range;
2469 struct kvm_memory_slot *slot;
2470 struct kvm_memslots *slots;
2471 struct kvm_memslot_iter iter;
2472 bool found_memslot = false;
2473 bool ret = false;
2474 int i;
2475
2476 gfn_range.arg = range->arg;
2477 gfn_range.may_block = range->may_block;
2478
2479 /*
2480 * If/when KVM supports more attributes beyond private .vs shared, this
2481 * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target
2482 * range already has the desired private vs. shared state (it's unclear
2483 * if that is a net win). For now, KVM reaches this point if and only
2484 * if the private flag is being toggled, i.e. all mappings are in play.
2485 */
2486
2487 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2488 slots = __kvm_memslots(kvm, i);
2489
2490 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2491 slot = iter.slot;
2492 gfn_range.slot = slot;
2493
2494 gfn_range.start = max(range->start, slot->base_gfn);
2495 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2496 if (gfn_range.start >= gfn_range.end)
2497 continue;
2498
2499 if (!found_memslot) {
2500 found_memslot = true;
2501 KVM_MMU_LOCK(kvm);
2502 if (!IS_KVM_NULL_FN(range->on_lock))
2503 range->on_lock(kvm);
2504 }
2505
2506 ret |= range->handler(kvm, &gfn_range);
2507 }
2508 }
2509
2510 if (range->flush_on_ret && ret)
2511 kvm_flush_remote_tlbs(kvm);
2512
2513 if (found_memslot)
2514 KVM_MMU_UNLOCK(kvm);
2515 }
2516
kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)2517 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2518 struct kvm_gfn_range *range)
2519 {
2520 /*
2521 * Unconditionally add the range to the invalidation set, regardless of
2522 * whether or not the arch callback actually needs to zap SPTEs. E.g.
2523 * if KVM supports RWX attributes in the future and the attributes are
2524 * going from R=>RW, zapping isn't strictly necessary. Unconditionally
2525 * adding the range allows KVM to require that MMU invalidations add at
2526 * least one range between begin() and end(), e.g. allows KVM to detect
2527 * bugs where the add() is missed. Relaxing the rule *might* be safe,
2528 * but it's not obvious that allowing new mappings while the attributes
2529 * are in flux is desirable or worth the complexity.
2530 */
2531 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2532
2533 return kvm_arch_pre_set_memory_attributes(kvm, range);
2534 }
2535
2536 /* Set @attributes for the gfn range [@start, @end). */
kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes)2537 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2538 unsigned long attributes)
2539 {
2540 struct kvm_mmu_notifier_range pre_set_range = {
2541 .start = start,
2542 .end = end,
2543 .arg.attributes = attributes,
2544 .handler = kvm_pre_set_memory_attributes,
2545 .on_lock = kvm_mmu_invalidate_begin,
2546 .flush_on_ret = true,
2547 .may_block = true,
2548 };
2549 struct kvm_mmu_notifier_range post_set_range = {
2550 .start = start,
2551 .end = end,
2552 .arg.attributes = attributes,
2553 .handler = kvm_arch_post_set_memory_attributes,
2554 .on_lock = kvm_mmu_invalidate_end,
2555 .may_block = true,
2556 };
2557 unsigned long i;
2558 void *entry;
2559 int r = 0;
2560
2561 entry = attributes ? xa_mk_value(attributes) : NULL;
2562
2563 trace_kvm_vm_set_mem_attributes(start, end, attributes);
2564
2565 mutex_lock(&kvm->slots_lock);
2566
2567 /* Nothing to do if the entire range has the desired attributes. */
2568 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2569 goto out_unlock;
2570
2571 /*
2572 * Reserve memory ahead of time to avoid having to deal with failures
2573 * partway through setting the new attributes.
2574 */
2575 for (i = start; i < end; i++) {
2576 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2577 if (r)
2578 goto out_unlock;
2579
2580 cond_resched();
2581 }
2582
2583 kvm_handle_gfn_range(kvm, &pre_set_range);
2584
2585 for (i = start; i < end; i++) {
2586 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2587 GFP_KERNEL_ACCOUNT));
2588 KVM_BUG_ON(r, kvm);
2589 cond_resched();
2590 }
2591
2592 kvm_handle_gfn_range(kvm, &post_set_range);
2593
2594 out_unlock:
2595 mutex_unlock(&kvm->slots_lock);
2596
2597 return r;
2598 }
kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs)2599 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2600 struct kvm_memory_attributes *attrs)
2601 {
2602 gfn_t start, end;
2603
2604 /* flags is currently not used. */
2605 if (attrs->flags)
2606 return -EINVAL;
2607 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2608 return -EINVAL;
2609 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2610 return -EINVAL;
2611 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2612 return -EINVAL;
2613
2614 start = attrs->address >> PAGE_SHIFT;
2615 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2616
2617 /*
2618 * xarray tracks data using "unsigned long", and as a result so does
2619 * KVM. For simplicity, supports generic attributes only on 64-bit
2620 * architectures.
2621 */
2622 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2623
2624 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2625 }
2626 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2627
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2628 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2629 {
2630 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2631 }
2632 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
2633
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2634 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2635 {
2636 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2637 u64 gen = slots->generation;
2638 struct kvm_memory_slot *slot;
2639
2640 /*
2641 * This also protects against using a memslot from a different address space,
2642 * since different address spaces have different generation numbers.
2643 */
2644 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2645 vcpu->last_used_slot = NULL;
2646 vcpu->last_used_slot_gen = gen;
2647 }
2648
2649 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2650 if (slot)
2651 return slot;
2652
2653 /*
2654 * Fall back to searching all memslots. We purposely use
2655 * search_memslots() instead of __gfn_to_memslot() to avoid
2656 * thrashing the VM-wide last_used_slot in kvm_memslots.
2657 */
2658 slot = search_memslots(slots, gfn, false);
2659 if (slot) {
2660 vcpu->last_used_slot = slot;
2661 return slot;
2662 }
2663
2664 return NULL;
2665 }
2666 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
2667
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)2668 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2669 {
2670 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2671
2672 return kvm_is_visible_memslot(memslot);
2673 }
2674 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
2675
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)2676 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2677 {
2678 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2679
2680 return kvm_is_visible_memslot(memslot);
2681 }
2682 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
2683
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)2684 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2685 {
2686 struct vm_area_struct *vma;
2687 unsigned long addr, size;
2688
2689 size = PAGE_SIZE;
2690
2691 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2692 if (kvm_is_error_hva(addr))
2693 return PAGE_SIZE;
2694
2695 mmap_read_lock(current->mm);
2696 vma = find_vma(current->mm, addr);
2697 if (!vma)
2698 goto out;
2699
2700 size = vma_kernel_pagesize(vma);
2701
2702 out:
2703 mmap_read_unlock(current->mm);
2704
2705 return size;
2706 }
2707
memslot_is_readonly(const struct kvm_memory_slot * slot)2708 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2709 {
2710 return slot->flags & KVM_MEM_READONLY;
2711 }
2712
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)2713 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2714 gfn_t *nr_pages, bool write)
2715 {
2716 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2717 return KVM_HVA_ERR_BAD;
2718
2719 if (memslot_is_readonly(slot) && write)
2720 return KVM_HVA_ERR_RO_BAD;
2721
2722 if (nr_pages)
2723 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2724
2725 return __gfn_to_hva_memslot(slot, gfn);
2726 }
2727
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2728 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2729 gfn_t *nr_pages)
2730 {
2731 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2732 }
2733
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2734 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2735 gfn_t gfn)
2736 {
2737 return gfn_to_hva_many(slot, gfn, NULL);
2738 }
2739 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
2740
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2741 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2742 {
2743 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2744 }
2745 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
2746
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)2747 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2748 {
2749 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2750 }
2751 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
2752
2753 /*
2754 * Return the hva of a @gfn and the R/W attribute if possible.
2755 *
2756 * @slot: the kvm_memory_slot which contains @gfn
2757 * @gfn: the gfn to be translated
2758 * @writable: used to return the read/write attribute of the @slot if the hva
2759 * is valid and @writable is not NULL
2760 */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2761 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2762 gfn_t gfn, bool *writable)
2763 {
2764 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2765
2766 if (!kvm_is_error_hva(hva) && writable)
2767 *writable = !memslot_is_readonly(slot);
2768
2769 return hva;
2770 }
2771
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)2772 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2773 {
2774 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2775
2776 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2777 }
2778
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)2779 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2780 {
2781 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2782
2783 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2784 }
2785
kvm_is_ad_tracked_page(struct page * page)2786 static bool kvm_is_ad_tracked_page(struct page *page)
2787 {
2788 /*
2789 * Per page-flags.h, pages tagged PG_reserved "should in general not be
2790 * touched (e.g. set dirty) except by its owner".
2791 */
2792 return !PageReserved(page);
2793 }
2794
kvm_set_page_dirty(struct page * page)2795 static void kvm_set_page_dirty(struct page *page)
2796 {
2797 if (kvm_is_ad_tracked_page(page))
2798 SetPageDirty(page);
2799 }
2800
kvm_set_page_accessed(struct page * page)2801 static void kvm_set_page_accessed(struct page *page)
2802 {
2803 if (kvm_is_ad_tracked_page(page))
2804 mark_page_accessed(page);
2805 }
2806
kvm_release_page_clean(struct page * page)2807 void kvm_release_page_clean(struct page *page)
2808 {
2809 if (!page)
2810 return;
2811
2812 kvm_set_page_accessed(page);
2813 put_page(page);
2814 }
2815 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
2816
kvm_release_page_dirty(struct page * page)2817 void kvm_release_page_dirty(struct page *page)
2818 {
2819 if (!page)
2820 return;
2821
2822 kvm_set_page_dirty(page);
2823 kvm_release_page_clean(page);
2824 }
2825 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
2826
kvm_resolve_pfn(struct kvm_follow_pfn * kfp,struct page * page,struct follow_pfnmap_args * map,bool writable)2827 static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
2828 struct follow_pfnmap_args *map, bool writable)
2829 {
2830 kvm_pfn_t pfn;
2831
2832 WARN_ON_ONCE(!!page == !!map);
2833
2834 if (kfp->map_writable)
2835 *kfp->map_writable = writable;
2836
2837 if (map)
2838 pfn = map->pfn;
2839 else
2840 pfn = page_to_pfn(page);
2841
2842 *kfp->refcounted_page = page;
2843
2844 return pfn;
2845 }
2846
2847 /*
2848 * The fast path to get the writable pfn which will be stored in @pfn,
2849 * true indicates success, otherwise false is returned.
2850 */
hva_to_pfn_fast(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2851 static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2852 {
2853 struct page *page;
2854 bool r;
2855
2856 /*
2857 * Try the fast-only path when the caller wants to pin/get the page for
2858 * writing. If the caller only wants to read the page, KVM must go
2859 * down the full, slow path in order to avoid racing an operation that
2860 * breaks Copy-on-Write (CoW), e.g. so that KVM doesn't end up pointing
2861 * at the old, read-only page while mm/ points at a new, writable page.
2862 */
2863 if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
2864 return false;
2865
2866 if (kfp->pin)
2867 r = pin_user_pages_fast(kfp->hva, 1, FOLL_WRITE, &page) == 1;
2868 else
2869 r = get_user_page_fast_only(kfp->hva, FOLL_WRITE, &page);
2870
2871 if (r) {
2872 *pfn = kvm_resolve_pfn(kfp, page, NULL, true);
2873 return true;
2874 }
2875
2876 return false;
2877 }
2878
2879 /*
2880 * The slow path to get the pfn of the specified host virtual address,
2881 * 1 indicates success, -errno is returned if error is detected.
2882 */
hva_to_pfn_slow(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2883 static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2884 {
2885 /*
2886 * When a VCPU accesses a page that is not mapped into the secondary
2887 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2888 * make progress. We always want to honor NUMA hinting faults in that
2889 * case, because GUP usage corresponds to memory accesses from the VCPU.
2890 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2891 * mapped into the secondary MMU and gets accessed by a VCPU.
2892 *
2893 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2894 * implicitly honor NUMA hinting faults and don't need this flag.
2895 */
2896 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
2897 struct page *page, *wpage;
2898 int npages;
2899
2900 if (kfp->pin)
2901 npages = pin_user_pages_unlocked(kfp->hva, 1, &page, flags);
2902 else
2903 npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
2904 if (npages != 1)
2905 return npages;
2906
2907 /*
2908 * Pinning is mutually exclusive with opportunistically mapping a read
2909 * fault as writable, as KVM should never pin pages when mapping memory
2910 * into the guest (pinning is only for direct accesses from KVM).
2911 */
2912 if (WARN_ON_ONCE(kfp->map_writable && kfp->pin))
2913 goto out;
2914
2915 /* map read fault as writable if possible */
2916 if (!(flags & FOLL_WRITE) && kfp->map_writable &&
2917 get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
2918 put_page(page);
2919 page = wpage;
2920 flags |= FOLL_WRITE;
2921 }
2922
2923 out:
2924 *pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE);
2925 return npages;
2926 }
2927
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2928 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2929 {
2930 if (unlikely(!(vma->vm_flags & VM_READ)))
2931 return false;
2932
2933 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2934 return false;
2935
2936 return true;
2937 }
2938
hva_to_pfn_remapped(struct vm_area_struct * vma,struct kvm_follow_pfn * kfp,kvm_pfn_t * p_pfn)2939 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2940 struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
2941 {
2942 struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva };
2943 bool write_fault = kfp->flags & FOLL_WRITE;
2944 int r;
2945
2946 /*
2947 * Remapped memory cannot be pinned in any meaningful sense. Bail if
2948 * the caller wants to pin the page, i.e. access the page outside of
2949 * MMU notifier protection, and unsafe umappings are disallowed.
2950 */
2951 if (kfp->pin && !allow_unsafe_mappings)
2952 return -EINVAL;
2953
2954 r = follow_pfnmap_start(&args);
2955 if (r) {
2956 /*
2957 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2958 * not call the fault handler, so do it here.
2959 */
2960 bool unlocked = false;
2961 r = fixup_user_fault(current->mm, kfp->hva,
2962 (write_fault ? FAULT_FLAG_WRITE : 0),
2963 &unlocked);
2964 if (unlocked)
2965 return -EAGAIN;
2966 if (r)
2967 return r;
2968
2969 r = follow_pfnmap_start(&args);
2970 if (r)
2971 return r;
2972 }
2973
2974 if (write_fault && !args.writable) {
2975 *p_pfn = KVM_PFN_ERR_RO_FAULT;
2976 goto out;
2977 }
2978
2979 *p_pfn = kvm_resolve_pfn(kfp, NULL, &args, args.writable);
2980 out:
2981 follow_pfnmap_end(&args);
2982 return r;
2983 }
2984
hva_to_pfn(struct kvm_follow_pfn * kfp)2985 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
2986 {
2987 struct vm_area_struct *vma;
2988 kvm_pfn_t pfn;
2989 int npages, r;
2990
2991 might_sleep();
2992
2993 if (WARN_ON_ONCE(!kfp->refcounted_page))
2994 return KVM_PFN_ERR_FAULT;
2995
2996 if (hva_to_pfn_fast(kfp, &pfn))
2997 return pfn;
2998
2999 npages = hva_to_pfn_slow(kfp, &pfn);
3000 if (npages == 1)
3001 return pfn;
3002 if (npages == -EINTR || npages == -EAGAIN)
3003 return KVM_PFN_ERR_SIGPENDING;
3004 if (npages == -EHWPOISON)
3005 return KVM_PFN_ERR_HWPOISON;
3006
3007 mmap_read_lock(current->mm);
3008 retry:
3009 vma = vma_lookup(current->mm, kfp->hva);
3010
3011 if (vma == NULL)
3012 pfn = KVM_PFN_ERR_FAULT;
3013 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3014 r = hva_to_pfn_remapped(vma, kfp, &pfn);
3015 if (r == -EAGAIN)
3016 goto retry;
3017 if (r < 0)
3018 pfn = KVM_PFN_ERR_FAULT;
3019 } else {
3020 if ((kfp->flags & FOLL_NOWAIT) &&
3021 vma_is_valid(vma, kfp->flags & FOLL_WRITE))
3022 pfn = KVM_PFN_ERR_NEEDS_IO;
3023 else
3024 pfn = KVM_PFN_ERR_FAULT;
3025 }
3026 mmap_read_unlock(current->mm);
3027 return pfn;
3028 }
3029
kvm_follow_pfn(struct kvm_follow_pfn * kfp)3030 static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
3031 {
3032 kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
3033 kfp->flags & FOLL_WRITE);
3034
3035 if (kfp->hva == KVM_HVA_ERR_RO_BAD)
3036 return KVM_PFN_ERR_RO_FAULT;
3037
3038 if (kvm_is_error_hva(kfp->hva))
3039 return KVM_PFN_NOSLOT;
3040
3041 if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
3042 *kfp->map_writable = false;
3043 kfp->map_writable = NULL;
3044 }
3045
3046 return hva_to_pfn(kfp);
3047 }
3048
__kvm_faultin_pfn(const struct kvm_memory_slot * slot,gfn_t gfn,unsigned int foll,bool * writable,struct page ** refcounted_page)3049 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
3050 unsigned int foll, bool *writable,
3051 struct page **refcounted_page)
3052 {
3053 struct kvm_follow_pfn kfp = {
3054 .slot = slot,
3055 .gfn = gfn,
3056 .flags = foll,
3057 .map_writable = writable,
3058 .refcounted_page = refcounted_page,
3059 };
3060
3061 if (WARN_ON_ONCE(!writable || !refcounted_page))
3062 return KVM_PFN_ERR_FAULT;
3063
3064 *writable = false;
3065 *refcounted_page = NULL;
3066
3067 return kvm_follow_pfn(&kfp);
3068 }
3069 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
3070
kvm_prefetch_pages(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)3071 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
3072 struct page **pages, int nr_pages)
3073 {
3074 unsigned long addr;
3075 gfn_t entry = 0;
3076
3077 addr = gfn_to_hva_many(slot, gfn, &entry);
3078 if (kvm_is_error_hva(addr))
3079 return -1;
3080
3081 if (entry < nr_pages)
3082 return 0;
3083
3084 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3085 }
3086 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
3087
3088 /*
3089 * Don't use this API unless you are absolutely, positively certain that KVM
3090 * needs to get a struct page, e.g. to pin the page for firmware DMA.
3091 *
3092 * FIXME: Users of this API likely need to FOLL_PIN the page, not just elevate
3093 * its refcount.
3094 */
__gfn_to_page(struct kvm * kvm,gfn_t gfn,bool write)3095 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
3096 {
3097 struct page *refcounted_page = NULL;
3098 struct kvm_follow_pfn kfp = {
3099 .slot = gfn_to_memslot(kvm, gfn),
3100 .gfn = gfn,
3101 .flags = write ? FOLL_WRITE : 0,
3102 .refcounted_page = &refcounted_page,
3103 };
3104
3105 (void)kvm_follow_pfn(&kfp);
3106 return refcounted_page;
3107 }
3108 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
3109
__kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map,bool writable)3110 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
3111 bool writable)
3112 {
3113 struct kvm_follow_pfn kfp = {
3114 .slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
3115 .gfn = gfn,
3116 .flags = writable ? FOLL_WRITE : 0,
3117 .refcounted_page = &map->pinned_page,
3118 .pin = true,
3119 };
3120
3121 map->pinned_page = NULL;
3122 map->page = NULL;
3123 map->hva = NULL;
3124 map->gfn = gfn;
3125 map->writable = writable;
3126
3127 map->pfn = kvm_follow_pfn(&kfp);
3128 if (is_error_noslot_pfn(map->pfn))
3129 return -EINVAL;
3130
3131 if (pfn_valid(map->pfn)) {
3132 map->page = pfn_to_page(map->pfn);
3133 map->hva = kmap(map->page);
3134 #ifdef CONFIG_HAS_IOMEM
3135 } else {
3136 map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
3137 #endif
3138 }
3139
3140 return map->hva ? 0 : -EFAULT;
3141 }
3142 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
3143
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map)3144 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
3145 {
3146 if (!map->hva)
3147 return;
3148
3149 if (map->page)
3150 kunmap(map->page);
3151 #ifdef CONFIG_HAS_IOMEM
3152 else
3153 memunmap(map->hva);
3154 #endif
3155
3156 if (map->writable)
3157 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3158
3159 if (map->pinned_page) {
3160 if (map->writable)
3161 kvm_set_page_dirty(map->pinned_page);
3162 kvm_set_page_accessed(map->pinned_page);
3163 unpin_user_page(map->pinned_page);
3164 }
3165
3166 map->hva = NULL;
3167 map->page = NULL;
3168 map->pinned_page = NULL;
3169 }
3170 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
3171
next_segment(unsigned long len,int offset)3172 static int next_segment(unsigned long len, int offset)
3173 {
3174 if (len > PAGE_SIZE - offset)
3175 return PAGE_SIZE - offset;
3176 else
3177 return len;
3178 }
3179
3180 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)3181 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3182 void *data, int offset, int len)
3183 {
3184 int r;
3185 unsigned long addr;
3186
3187 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3188 return -EFAULT;
3189
3190 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3191 if (kvm_is_error_hva(addr))
3192 return -EFAULT;
3193 r = __copy_from_user(data, (void __user *)addr + offset, len);
3194 if (r)
3195 return -EFAULT;
3196 return 0;
3197 }
3198
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)3199 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3200 int len)
3201 {
3202 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3203
3204 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3205 }
3206 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
3207
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)3208 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3209 int offset, int len)
3210 {
3211 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3212
3213 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3214 }
3215 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
3216
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)3217 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3218 {
3219 gfn_t gfn = gpa >> PAGE_SHIFT;
3220 int seg;
3221 int offset = offset_in_page(gpa);
3222 int ret;
3223
3224 while ((seg = next_segment(len, offset)) != 0) {
3225 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3226 if (ret < 0)
3227 return ret;
3228 offset = 0;
3229 len -= seg;
3230 data += seg;
3231 ++gfn;
3232 }
3233 return 0;
3234 }
3235 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
3236
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3237 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3238 {
3239 gfn_t gfn = gpa >> PAGE_SHIFT;
3240 int seg;
3241 int offset = offset_in_page(gpa);
3242 int ret;
3243
3244 while ((seg = next_segment(len, offset)) != 0) {
3245 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3246 if (ret < 0)
3247 return ret;
3248 offset = 0;
3249 len -= seg;
3250 data += seg;
3251 ++gfn;
3252 }
3253 return 0;
3254 }
3255 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
3256
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)3257 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3258 void *data, int offset, unsigned long len)
3259 {
3260 int r;
3261 unsigned long addr;
3262
3263 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3264 return -EFAULT;
3265
3266 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3267 if (kvm_is_error_hva(addr))
3268 return -EFAULT;
3269 pagefault_disable();
3270 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3271 pagefault_enable();
3272 if (r)
3273 return -EFAULT;
3274 return 0;
3275 }
3276
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3277 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3278 void *data, unsigned long len)
3279 {
3280 gfn_t gfn = gpa >> PAGE_SHIFT;
3281 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3282 int offset = offset_in_page(gpa);
3283
3284 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3285 }
3286 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
3287
3288 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)3289 static int __kvm_write_guest_page(struct kvm *kvm,
3290 struct kvm_memory_slot *memslot, gfn_t gfn,
3291 const void *data, int offset, int len)
3292 {
3293 int r;
3294 unsigned long addr;
3295
3296 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3297 return -EFAULT;
3298
3299 addr = gfn_to_hva_memslot(memslot, gfn);
3300 if (kvm_is_error_hva(addr))
3301 return -EFAULT;
3302 r = __copy_to_user((void __user *)addr + offset, data, len);
3303 if (r)
3304 return -EFAULT;
3305 mark_page_dirty_in_slot(kvm, memslot, gfn);
3306 return 0;
3307 }
3308
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)3309 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3310 const void *data, int offset, int len)
3311 {
3312 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3313
3314 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3315 }
3316 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
3317
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)3318 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3319 const void *data, int offset, int len)
3320 {
3321 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3322
3323 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3324 }
3325 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
3326
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)3327 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3328 unsigned long len)
3329 {
3330 gfn_t gfn = gpa >> PAGE_SHIFT;
3331 int seg;
3332 int offset = offset_in_page(gpa);
3333 int ret;
3334
3335 while ((seg = next_segment(len, offset)) != 0) {
3336 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3337 if (ret < 0)
3338 return ret;
3339 offset = 0;
3340 len -= seg;
3341 data += seg;
3342 ++gfn;
3343 }
3344 return 0;
3345 }
3346 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
3347
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)3348 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3349 unsigned long len)
3350 {
3351 gfn_t gfn = gpa >> PAGE_SHIFT;
3352 int seg;
3353 int offset = offset_in_page(gpa);
3354 int ret;
3355
3356 while ((seg = next_segment(len, offset)) != 0) {
3357 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3358 if (ret < 0)
3359 return ret;
3360 offset = 0;
3361 len -= seg;
3362 data += seg;
3363 ++gfn;
3364 }
3365 return 0;
3366 }
3367 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
3368
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3369 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3370 struct gfn_to_hva_cache *ghc,
3371 gpa_t gpa, unsigned long len)
3372 {
3373 int offset = offset_in_page(gpa);
3374 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3375 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3376 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3377 gfn_t nr_pages_avail;
3378
3379 /* Update ghc->generation before performing any error checks. */
3380 ghc->generation = slots->generation;
3381
3382 if (start_gfn > end_gfn) {
3383 ghc->hva = KVM_HVA_ERR_BAD;
3384 return -EINVAL;
3385 }
3386
3387 /*
3388 * If the requested region crosses two memslots, we still
3389 * verify that the entire region is valid here.
3390 */
3391 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3392 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3393 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3394 &nr_pages_avail);
3395 if (kvm_is_error_hva(ghc->hva))
3396 return -EFAULT;
3397 }
3398
3399 /* Use the slow path for cross page reads and writes. */
3400 if (nr_pages_needed == 1)
3401 ghc->hva += offset;
3402 else
3403 ghc->memslot = NULL;
3404
3405 ghc->gpa = gpa;
3406 ghc->len = len;
3407 return 0;
3408 }
3409
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3410 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3411 gpa_t gpa, unsigned long len)
3412 {
3413 struct kvm_memslots *slots = kvm_memslots(kvm);
3414 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3415 }
3416 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
3417
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3418 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3419 void *data, unsigned int offset,
3420 unsigned long len)
3421 {
3422 struct kvm_memslots *slots = kvm_memslots(kvm);
3423 int r;
3424 gpa_t gpa = ghc->gpa + offset;
3425
3426 if (WARN_ON_ONCE(len + offset > ghc->len))
3427 return -EINVAL;
3428
3429 if (slots->generation != ghc->generation) {
3430 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3431 return -EFAULT;
3432 }
3433
3434 if (kvm_is_error_hva(ghc->hva))
3435 return -EFAULT;
3436
3437 if (unlikely(!ghc->memslot))
3438 return kvm_write_guest(kvm, gpa, data, len);
3439
3440 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3441 if (r)
3442 return -EFAULT;
3443 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3444
3445 return 0;
3446 }
3447 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
3448
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3449 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3450 void *data, unsigned long len)
3451 {
3452 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3453 }
3454 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
3455
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3456 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3457 void *data, unsigned int offset,
3458 unsigned long len)
3459 {
3460 struct kvm_memslots *slots = kvm_memslots(kvm);
3461 int r;
3462 gpa_t gpa = ghc->gpa + offset;
3463
3464 if (WARN_ON_ONCE(len + offset > ghc->len))
3465 return -EINVAL;
3466
3467 if (slots->generation != ghc->generation) {
3468 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3469 return -EFAULT;
3470 }
3471
3472 if (kvm_is_error_hva(ghc->hva))
3473 return -EFAULT;
3474
3475 if (unlikely(!ghc->memslot))
3476 return kvm_read_guest(kvm, gpa, data, len);
3477
3478 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3479 if (r)
3480 return -EFAULT;
3481
3482 return 0;
3483 }
3484 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
3485
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3486 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3487 void *data, unsigned long len)
3488 {
3489 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3490 }
3491 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
3492
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3493 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3494 {
3495 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3496 gfn_t gfn = gpa >> PAGE_SHIFT;
3497 int seg;
3498 int offset = offset_in_page(gpa);
3499 int ret;
3500
3501 while ((seg = next_segment(len, offset)) != 0) {
3502 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
3503 if (ret < 0)
3504 return ret;
3505 offset = 0;
3506 len -= seg;
3507 ++gfn;
3508 }
3509 return 0;
3510 }
3511 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
3512
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)3513 void mark_page_dirty_in_slot(struct kvm *kvm,
3514 const struct kvm_memory_slot *memslot,
3515 gfn_t gfn)
3516 {
3517 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3518
3519 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3520 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3521 return;
3522
3523 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3524 #endif
3525
3526 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3527 unsigned long rel_gfn = gfn - memslot->base_gfn;
3528 u32 slot = (memslot->as_id << 16) | memslot->id;
3529
3530 if (kvm->dirty_ring_size && vcpu)
3531 kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3532 else if (memslot->dirty_bitmap)
3533 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3534 }
3535 }
3536 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
3537
mark_page_dirty(struct kvm * kvm,gfn_t gfn)3538 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3539 {
3540 struct kvm_memory_slot *memslot;
3541
3542 memslot = gfn_to_memslot(kvm, gfn);
3543 mark_page_dirty_in_slot(kvm, memslot, gfn);
3544 }
3545 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
3546
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)3547 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3548 {
3549 struct kvm_memory_slot *memslot;
3550
3551 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3552 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3553 }
3554 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
3555
kvm_sigset_activate(struct kvm_vcpu * vcpu)3556 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3557 {
3558 if (!vcpu->sigset_active)
3559 return;
3560
3561 /*
3562 * This does a lockless modification of ->real_blocked, which is fine
3563 * because, only current can change ->real_blocked and all readers of
3564 * ->real_blocked don't care as long ->real_blocked is always a subset
3565 * of ->blocked.
3566 */
3567 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3568 }
3569
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3570 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3571 {
3572 if (!vcpu->sigset_active)
3573 return;
3574
3575 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3576 sigemptyset(¤t->real_blocked);
3577 }
3578
grow_halt_poll_ns(struct kvm_vcpu * vcpu)3579 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3580 {
3581 unsigned int old, val, grow, grow_start;
3582
3583 old = val = vcpu->halt_poll_ns;
3584 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3585 grow = READ_ONCE(halt_poll_ns_grow);
3586 if (!grow)
3587 goto out;
3588
3589 val *= grow;
3590 if (val < grow_start)
3591 val = grow_start;
3592
3593 vcpu->halt_poll_ns = val;
3594 out:
3595 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3596 }
3597
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3598 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3599 {
3600 unsigned int old, val, shrink, grow_start;
3601
3602 old = val = vcpu->halt_poll_ns;
3603 shrink = READ_ONCE(halt_poll_ns_shrink);
3604 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3605 if (shrink == 0)
3606 val = 0;
3607 else
3608 val /= shrink;
3609
3610 if (val < grow_start)
3611 val = 0;
3612
3613 vcpu->halt_poll_ns = val;
3614 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3615 }
3616
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)3617 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3618 {
3619 int ret = -EINTR;
3620 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3621
3622 if (kvm_arch_vcpu_runnable(vcpu))
3623 goto out;
3624 if (kvm_cpu_has_pending_timer(vcpu))
3625 goto out;
3626 if (signal_pending(current))
3627 goto out;
3628 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3629 goto out;
3630
3631 ret = 0;
3632 out:
3633 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3634 return ret;
3635 }
3636
3637 /*
3638 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3639 * pending. This is mostly used when halting a vCPU, but may also be used
3640 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3641 */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3642 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3643 {
3644 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3645 bool waited = false;
3646
3647 vcpu->stat.generic.blocking = 1;
3648
3649 preempt_disable();
3650 kvm_arch_vcpu_blocking(vcpu);
3651 prepare_to_rcuwait(wait);
3652 preempt_enable();
3653
3654 for (;;) {
3655 set_current_state(TASK_INTERRUPTIBLE);
3656
3657 if (kvm_vcpu_check_block(vcpu) < 0)
3658 break;
3659
3660 waited = true;
3661 schedule();
3662 }
3663
3664 preempt_disable();
3665 finish_rcuwait(wait);
3666 kvm_arch_vcpu_unblocking(vcpu);
3667 preempt_enable();
3668
3669 vcpu->stat.generic.blocking = 0;
3670
3671 return waited;
3672 }
3673
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)3674 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3675 ktime_t end, bool success)
3676 {
3677 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3678 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3679
3680 ++vcpu->stat.generic.halt_attempted_poll;
3681
3682 if (success) {
3683 ++vcpu->stat.generic.halt_successful_poll;
3684
3685 if (!vcpu_valid_wakeup(vcpu))
3686 ++vcpu->stat.generic.halt_poll_invalid;
3687
3688 stats->halt_poll_success_ns += poll_ns;
3689 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3690 } else {
3691 stats->halt_poll_fail_ns += poll_ns;
3692 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3693 }
3694 }
3695
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3696 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3697 {
3698 struct kvm *kvm = vcpu->kvm;
3699
3700 if (kvm->override_halt_poll_ns) {
3701 /*
3702 * Ensure kvm->max_halt_poll_ns is not read before
3703 * kvm->override_halt_poll_ns.
3704 *
3705 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3706 */
3707 smp_rmb();
3708 return READ_ONCE(kvm->max_halt_poll_ns);
3709 }
3710
3711 return READ_ONCE(halt_poll_ns);
3712 }
3713
3714 /*
3715 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3716 * polling is enabled, busy wait for a short time before blocking to avoid the
3717 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3718 * is halted.
3719 */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3720 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3721 {
3722 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3723 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3724 ktime_t start, cur, poll_end;
3725 bool waited = false;
3726 bool do_halt_poll;
3727 u64 halt_ns;
3728
3729 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3730 vcpu->halt_poll_ns = max_halt_poll_ns;
3731
3732 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3733
3734 start = cur = poll_end = ktime_get();
3735 if (do_halt_poll) {
3736 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3737
3738 do {
3739 if (kvm_vcpu_check_block(vcpu) < 0)
3740 goto out;
3741 cpu_relax();
3742 poll_end = cur = ktime_get();
3743 } while (kvm_vcpu_can_poll(cur, stop));
3744 }
3745
3746 waited = kvm_vcpu_block(vcpu);
3747
3748 cur = ktime_get();
3749 if (waited) {
3750 vcpu->stat.generic.halt_wait_ns +=
3751 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3752 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3753 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3754 }
3755 out:
3756 /* The total time the vCPU was "halted", including polling time. */
3757 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3758
3759 /*
3760 * Note, halt-polling is considered successful so long as the vCPU was
3761 * never actually scheduled out, i.e. even if the wake event arrived
3762 * after of the halt-polling loop itself, but before the full wait.
3763 */
3764 if (do_halt_poll)
3765 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3766
3767 if (halt_poll_allowed) {
3768 /* Recompute the max halt poll time in case it changed. */
3769 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3770
3771 if (!vcpu_valid_wakeup(vcpu)) {
3772 shrink_halt_poll_ns(vcpu);
3773 } else if (max_halt_poll_ns) {
3774 if (halt_ns <= vcpu->halt_poll_ns)
3775 ;
3776 /* we had a long block, shrink polling */
3777 else if (vcpu->halt_poll_ns &&
3778 halt_ns > max_halt_poll_ns)
3779 shrink_halt_poll_ns(vcpu);
3780 /* we had a short halt and our poll time is too small */
3781 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3782 halt_ns < max_halt_poll_ns)
3783 grow_halt_poll_ns(vcpu);
3784 } else {
3785 vcpu->halt_poll_ns = 0;
3786 }
3787 }
3788
3789 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3790 }
3791 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
3792
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3793 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3794 {
3795 if (__kvm_vcpu_wake_up(vcpu)) {
3796 WRITE_ONCE(vcpu->ready, true);
3797 ++vcpu->stat.generic.halt_wakeup;
3798 return true;
3799 }
3800
3801 return false;
3802 }
3803 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
3804
3805 #ifndef CONFIG_S390
3806 /*
3807 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3808 */
__kvm_vcpu_kick(struct kvm_vcpu * vcpu,bool wait)3809 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
3810 {
3811 int me, cpu;
3812
3813 if (kvm_vcpu_wake_up(vcpu))
3814 return;
3815
3816 me = get_cpu();
3817 /*
3818 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3819 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3820 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3821 * within the vCPU thread itself.
3822 */
3823 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3824 if (vcpu->mode == IN_GUEST_MODE)
3825 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3826 goto out;
3827 }
3828
3829 /*
3830 * Note, the vCPU could get migrated to a different pCPU at any point
3831 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3832 * IPI to the previous pCPU. But, that's ok because the purpose of the
3833 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3834 * vCPU also requires it to leave IN_GUEST_MODE.
3835 */
3836 if (kvm_arch_vcpu_should_kick(vcpu)) {
3837 cpu = READ_ONCE(vcpu->cpu);
3838 if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
3839 /*
3840 * Use a reschedule IPI to kick the vCPU if the caller
3841 * doesn't need to wait for a response, as KVM allows
3842 * kicking vCPUs while IRQs are disabled, but using the
3843 * SMP function call framework with IRQs disabled can
3844 * deadlock due to taking cross-CPU locks.
3845 */
3846 if (wait)
3847 smp_call_function_single(cpu, ack_kick, NULL, wait);
3848 else
3849 smp_send_reschedule(cpu);
3850 }
3851 }
3852 out:
3853 put_cpu();
3854 }
3855 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
3856 #endif /* !CONFIG_S390 */
3857
kvm_vcpu_yield_to(struct kvm_vcpu * target)3858 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3859 {
3860 struct task_struct *task = NULL;
3861 int ret;
3862
3863 if (!read_trylock(&target->pid_lock))
3864 return 0;
3865
3866 if (target->pid)
3867 task = get_pid_task(target->pid, PIDTYPE_PID);
3868
3869 read_unlock(&target->pid_lock);
3870
3871 if (!task)
3872 return 0;
3873 ret = yield_to(task, 1);
3874 put_task_struct(task);
3875
3876 return ret;
3877 }
3878 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
3879
3880 /*
3881 * Helper that checks whether a VCPU is eligible for directed yield.
3882 * Most eligible candidate to yield is decided by following heuristics:
3883 *
3884 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3885 * (preempted lock holder), indicated by @in_spin_loop.
3886 * Set at the beginning and cleared at the end of interception/PLE handler.
3887 *
3888 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3889 * chance last time (mostly it has become eligible now since we have probably
3890 * yielded to lockholder in last iteration. This is done by toggling
3891 * @dy_eligible each time a VCPU checked for eligibility.)
3892 *
3893 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3894 * to preempted lock-holder could result in wrong VCPU selection and CPU
3895 * burning. Giving priority for a potential lock-holder increases lock
3896 * progress.
3897 *
3898 * Since algorithm is based on heuristics, accessing another VCPU data without
3899 * locking does not harm. It may result in trying to yield to same VCPU, fail
3900 * and continue with next VCPU and so on.
3901 */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)3902 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3903 {
3904 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3905 bool eligible;
3906
3907 eligible = !vcpu->spin_loop.in_spin_loop ||
3908 vcpu->spin_loop.dy_eligible;
3909
3910 if (vcpu->spin_loop.in_spin_loop)
3911 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3912
3913 return eligible;
3914 #else
3915 return true;
3916 #endif
3917 }
3918
3919 /*
3920 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3921 * a vcpu_load/vcpu_put pair. However, for most architectures
3922 * kvm_arch_vcpu_runnable does not require vcpu_load.
3923 */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3924 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3925 {
3926 return kvm_arch_vcpu_runnable(vcpu);
3927 }
3928
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3929 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3930 {
3931 if (kvm_arch_dy_runnable(vcpu))
3932 return true;
3933
3934 #ifdef CONFIG_KVM_ASYNC_PF
3935 if (!list_empty_careful(&vcpu->async_pf.done))
3936 return true;
3937 #endif
3938
3939 return false;
3940 }
3941
3942 /*
3943 * By default, simply query the target vCPU's current mode when checking if a
3944 * vCPU was preempted in kernel mode. All architectures except x86 (or more
3945 * specifical, except VMX) allow querying whether or not a vCPU is in kernel
3946 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
3947 * directly for cross-vCPU checks is functionally correct and accurate.
3948 */
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)3949 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
3950 {
3951 return kvm_arch_vcpu_in_kernel(vcpu);
3952 }
3953
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)3954 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3955 {
3956 return false;
3957 }
3958
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)3959 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3960 {
3961 int nr_vcpus, start, i, idx, yielded;
3962 struct kvm *kvm = me->kvm;
3963 struct kvm_vcpu *vcpu;
3964 int try = 3;
3965
3966 nr_vcpus = atomic_read(&kvm->online_vcpus);
3967 if (nr_vcpus < 2)
3968 return;
3969
3970 /* Pairs with the smp_wmb() in kvm_vm_ioctl_create_vcpu(). */
3971 smp_rmb();
3972
3973 kvm_vcpu_set_in_spin_loop(me, true);
3974
3975 /*
3976 * The current vCPU ("me") is spinning in kernel mode, i.e. is likely
3977 * waiting for a resource to become available. Attempt to yield to a
3978 * vCPU that is runnable, but not currently running, e.g. because the
3979 * vCPU was preempted by a higher priority task. With luck, the vCPU
3980 * that was preempted is holding a lock or some other resource that the
3981 * current vCPU is waiting to acquire, and yielding to the other vCPU
3982 * will allow it to make forward progress and release the lock (or kick
3983 * the spinning vCPU, etc).
3984 *
3985 * Since KVM has no insight into what exactly the guest is doing,
3986 * approximate a round-robin selection by iterating over all vCPUs,
3987 * starting at the last boosted vCPU. I.e. if N=kvm->last_boosted_vcpu,
3988 * iterate over vCPU[N+1]..vCPU[N-1], wrapping as needed.
3989 *
3990 * Note, this is inherently racy, e.g. if multiple vCPUs are spinning,
3991 * they may all try to yield to the same vCPU(s). But as above, this
3992 * is all best effort due to KVM's lack of visibility into the guest.
3993 */
3994 start = READ_ONCE(kvm->last_boosted_vcpu) + 1;
3995 for (i = 0; i < nr_vcpus; i++) {
3996 idx = (start + i) % nr_vcpus;
3997 if (idx == me->vcpu_idx)
3998 continue;
3999
4000 vcpu = xa_load(&kvm->vcpu_array, idx);
4001 if (!READ_ONCE(vcpu->ready))
4002 continue;
4003 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4004 continue;
4005
4006 /*
4007 * Treat the target vCPU as being in-kernel if it has a pending
4008 * interrupt, as the vCPU trying to yield may be spinning
4009 * waiting on IPI delivery, i.e. the target vCPU is in-kernel
4010 * for the purposes of directed yield.
4011 */
4012 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4013 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4014 !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4015 continue;
4016
4017 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4018 continue;
4019
4020 yielded = kvm_vcpu_yield_to(vcpu);
4021 if (yielded > 0) {
4022 WRITE_ONCE(kvm->last_boosted_vcpu, idx);
4023 break;
4024 } else if (yielded < 0 && !--try) {
4025 break;
4026 }
4027 }
4028 kvm_vcpu_set_in_spin_loop(me, false);
4029
4030 /* Ensure vcpu is not eligible during next spinloop */
4031 kvm_vcpu_set_dy_eligible(me, false);
4032 }
4033 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
4034
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)4035 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4036 {
4037 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4038 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4039 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4040 kvm->dirty_ring_size / PAGE_SIZE);
4041 #else
4042 return false;
4043 #endif
4044 }
4045
kvm_vcpu_fault(struct vm_fault * vmf)4046 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4047 {
4048 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4049 struct page *page;
4050
4051 if (vmf->pgoff == 0)
4052 page = virt_to_page(vcpu->run);
4053 #ifdef CONFIG_X86
4054 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4055 page = virt_to_page(vcpu->arch.pio_data);
4056 #endif
4057 #ifdef CONFIG_KVM_MMIO
4058 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4059 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4060 #endif
4061 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4062 page = kvm_dirty_ring_get_page(
4063 &vcpu->dirty_ring,
4064 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4065 else
4066 return kvm_arch_vcpu_fault(vcpu, vmf);
4067 get_page(page);
4068 vmf->page = page;
4069 return 0;
4070 }
4071
4072 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4073 .fault = kvm_vcpu_fault,
4074 };
4075
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)4076 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4077 {
4078 struct kvm_vcpu *vcpu = file->private_data;
4079 unsigned long pages = vma_pages(vma);
4080
4081 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4082 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4083 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4084 return -EINVAL;
4085
4086 vma->vm_ops = &kvm_vcpu_vm_ops;
4087 return 0;
4088 }
4089
kvm_vcpu_release(struct inode * inode,struct file * filp)4090 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4091 {
4092 struct kvm_vcpu *vcpu = filp->private_data;
4093
4094 kvm_put_kvm(vcpu->kvm);
4095 return 0;
4096 }
4097
4098 static struct file_operations kvm_vcpu_fops = {
4099 .release = kvm_vcpu_release,
4100 .unlocked_ioctl = kvm_vcpu_ioctl,
4101 .mmap = kvm_vcpu_mmap,
4102 .llseek = noop_llseek,
4103 KVM_COMPAT(kvm_vcpu_compat_ioctl),
4104 };
4105
4106 /*
4107 * Allocates an inode for the vcpu.
4108 */
create_vcpu_fd(struct kvm_vcpu * vcpu)4109 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4110 {
4111 char name[8 + 1 + ITOA_MAX_LEN + 1];
4112
4113 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4114 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4115 }
4116
4117 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)4118 static int vcpu_get_pid(void *data, u64 *val)
4119 {
4120 struct kvm_vcpu *vcpu = data;
4121
4122 read_lock(&vcpu->pid_lock);
4123 *val = pid_nr(vcpu->pid);
4124 read_unlock(&vcpu->pid_lock);
4125 return 0;
4126 }
4127
4128 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4129
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)4130 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4131 {
4132 struct dentry *debugfs_dentry;
4133 char dir_name[ITOA_MAX_LEN * 2];
4134
4135 if (!debugfs_initialized())
4136 return;
4137
4138 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4139 debugfs_dentry = debugfs_create_dir(dir_name,
4140 vcpu->kvm->debugfs_dentry);
4141 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4142 &vcpu_get_pid_fops);
4143
4144 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4145 }
4146 #endif
4147
4148 /*
4149 * Creates some virtual cpus. Good luck creating more than one.
4150 */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id)4151 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4152 {
4153 int r;
4154 struct kvm_vcpu *vcpu;
4155 struct page *page;
4156
4157 /*
4158 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4159 * too-large values instead of silently truncating.
4160 *
4161 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4162 * changing the storage type (at the very least, IDs should be tracked
4163 * as unsigned ints).
4164 */
4165 BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4166 if (id >= KVM_MAX_VCPU_IDS)
4167 return -EINVAL;
4168
4169 mutex_lock(&kvm->lock);
4170 if (kvm->created_vcpus >= kvm->max_vcpus) {
4171 mutex_unlock(&kvm->lock);
4172 return -EINVAL;
4173 }
4174
4175 r = kvm_arch_vcpu_precreate(kvm, id);
4176 if (r) {
4177 mutex_unlock(&kvm->lock);
4178 return r;
4179 }
4180
4181 kvm->created_vcpus++;
4182 mutex_unlock(&kvm->lock);
4183
4184 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4185 if (!vcpu) {
4186 r = -ENOMEM;
4187 goto vcpu_decrement;
4188 }
4189
4190 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4191 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4192 if (!page) {
4193 r = -ENOMEM;
4194 goto vcpu_free;
4195 }
4196 vcpu->run = page_address(page);
4197
4198 kvm_vcpu_init(vcpu, kvm, id);
4199
4200 r = kvm_arch_vcpu_create(vcpu);
4201 if (r)
4202 goto vcpu_free_run_page;
4203
4204 if (kvm->dirty_ring_size) {
4205 r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring,
4206 id, kvm->dirty_ring_size);
4207 if (r)
4208 goto arch_vcpu_destroy;
4209 }
4210
4211 mutex_lock(&kvm->lock);
4212
4213 if (kvm_get_vcpu_by_id(kvm, id)) {
4214 r = -EEXIST;
4215 goto unlock_vcpu_destroy;
4216 }
4217
4218 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4219 r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
4220 WARN_ON_ONCE(r == -EBUSY);
4221 if (r)
4222 goto unlock_vcpu_destroy;
4223
4224 /*
4225 * Now it's all set up, let userspace reach it. Grab the vCPU's mutex
4226 * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
4227 * visible (per online_vcpus), e.g. so that KVM doesn't get tricked
4228 * into a NULL-pointer dereference because KVM thinks the _current_
4229 * vCPU doesn't exist. As a bonus, taking vcpu->mutex ensures lockdep
4230 * knows it's taken *inside* kvm->lock.
4231 */
4232 mutex_lock(&vcpu->mutex);
4233 kvm_get_kvm(kvm);
4234 r = create_vcpu_fd(vcpu);
4235 if (r < 0)
4236 goto kvm_put_xa_erase;
4237
4238 /*
4239 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
4240 * pointer before kvm->online_vcpu's incremented value.
4241 */
4242 smp_wmb();
4243 atomic_inc(&kvm->online_vcpus);
4244 mutex_unlock(&vcpu->mutex);
4245
4246 mutex_unlock(&kvm->lock);
4247 kvm_arch_vcpu_postcreate(vcpu);
4248 kvm_create_vcpu_debugfs(vcpu);
4249 return r;
4250
4251 kvm_put_xa_erase:
4252 mutex_unlock(&vcpu->mutex);
4253 kvm_put_kvm_no_destroy(kvm);
4254 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
4255 unlock_vcpu_destroy:
4256 mutex_unlock(&kvm->lock);
4257 kvm_dirty_ring_free(&vcpu->dirty_ring);
4258 arch_vcpu_destroy:
4259 kvm_arch_vcpu_destroy(vcpu);
4260 vcpu_free_run_page:
4261 free_page((unsigned long)vcpu->run);
4262 vcpu_free:
4263 kmem_cache_free(kvm_vcpu_cache, vcpu);
4264 vcpu_decrement:
4265 mutex_lock(&kvm->lock);
4266 kvm->created_vcpus--;
4267 mutex_unlock(&kvm->lock);
4268 return r;
4269 }
4270
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4271 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4272 {
4273 if (sigset) {
4274 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4275 vcpu->sigset_active = 1;
4276 vcpu->sigset = *sigset;
4277 } else
4278 vcpu->sigset_active = 0;
4279 return 0;
4280 }
4281
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4282 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4283 size_t size, loff_t *offset)
4284 {
4285 struct kvm_vcpu *vcpu = file->private_data;
4286
4287 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4288 &kvm_vcpu_stats_desc[0], &vcpu->stat,
4289 sizeof(vcpu->stat), user_buffer, size, offset);
4290 }
4291
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4292 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4293 {
4294 struct kvm_vcpu *vcpu = file->private_data;
4295
4296 kvm_put_kvm(vcpu->kvm);
4297 return 0;
4298 }
4299
4300 static const struct file_operations kvm_vcpu_stats_fops = {
4301 .owner = THIS_MODULE,
4302 .read = kvm_vcpu_stats_read,
4303 .release = kvm_vcpu_stats_release,
4304 .llseek = noop_llseek,
4305 };
4306
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)4307 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4308 {
4309 int fd;
4310 struct file *file;
4311 char name[15 + ITOA_MAX_LEN + 1];
4312
4313 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4314
4315 fd = get_unused_fd_flags(O_CLOEXEC);
4316 if (fd < 0)
4317 return fd;
4318
4319 file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu,
4320 O_RDONLY, FMODE_PREAD);
4321 if (IS_ERR(file)) {
4322 put_unused_fd(fd);
4323 return PTR_ERR(file);
4324 }
4325
4326 kvm_get_kvm(vcpu->kvm);
4327 fd_install(fd, file);
4328
4329 return fd;
4330 }
4331
4332 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
kvm_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4333 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4334 struct kvm_pre_fault_memory *range)
4335 {
4336 int idx;
4337 long r;
4338 u64 full_size;
4339
4340 if (range->flags)
4341 return -EINVAL;
4342
4343 if (!PAGE_ALIGNED(range->gpa) ||
4344 !PAGE_ALIGNED(range->size) ||
4345 range->gpa + range->size <= range->gpa)
4346 return -EINVAL;
4347
4348 vcpu_load(vcpu);
4349 idx = srcu_read_lock(&vcpu->kvm->srcu);
4350
4351 full_size = range->size;
4352 do {
4353 if (signal_pending(current)) {
4354 r = -EINTR;
4355 break;
4356 }
4357
4358 r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4359 if (WARN_ON_ONCE(r == 0 || r == -EIO))
4360 break;
4361
4362 if (r < 0)
4363 break;
4364
4365 range->size -= r;
4366 range->gpa += r;
4367 cond_resched();
4368 } while (range->size);
4369
4370 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4371 vcpu_put(vcpu);
4372
4373 /* Return success if at least one page was mapped successfully. */
4374 return full_size == range->size ? r : 0;
4375 }
4376 #endif
4377
kvm_wait_for_vcpu_online(struct kvm_vcpu * vcpu)4378 static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
4379 {
4380 struct kvm *kvm = vcpu->kvm;
4381
4382 /*
4383 * In practice, this happy path will always be taken, as a well-behaved
4384 * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns.
4385 */
4386 if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus)))
4387 return 0;
4388
4389 /*
4390 * Acquire and release the vCPU's mutex to wait for vCPU creation to
4391 * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU
4392 * is fully online).
4393 */
4394 if (mutex_lock_killable(&vcpu->mutex))
4395 return -EINTR;
4396
4397 mutex_unlock(&vcpu->mutex);
4398
4399 if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx)))
4400 return -EIO;
4401
4402 return 0;
4403 }
4404
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4405 static long kvm_vcpu_ioctl(struct file *filp,
4406 unsigned int ioctl, unsigned long arg)
4407 {
4408 struct kvm_vcpu *vcpu = filp->private_data;
4409 void __user *argp = (void __user *)arg;
4410 int r;
4411 struct kvm_fpu *fpu = NULL;
4412 struct kvm_sregs *kvm_sregs = NULL;
4413
4414 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4415 return -EIO;
4416
4417 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4418 return -EINVAL;
4419
4420 /*
4421 * Wait for the vCPU to be online before handling the ioctl(), as KVM
4422 * assumes the vCPU is reachable via vcpu_array, i.e. may dereference
4423 * a NULL pointer if userspace invokes an ioctl() before KVM is ready.
4424 */
4425 r = kvm_wait_for_vcpu_online(vcpu);
4426 if (r)
4427 return r;
4428
4429 /*
4430 * Let arch code handle select vCPU ioctls without holding vcpu->mutex,
4431 * e.g. to support ioctls that can run asynchronous to vCPU execution.
4432 */
4433 r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
4434 if (r != -ENOIOCTLCMD)
4435 return r;
4436
4437 if (mutex_lock_killable(&vcpu->mutex))
4438 return -EINTR;
4439 switch (ioctl) {
4440 case KVM_RUN: {
4441 struct pid *oldpid;
4442 r = -EINVAL;
4443 if (arg)
4444 goto out;
4445
4446 /*
4447 * Note, vcpu->pid is primarily protected by vcpu->mutex. The
4448 * dedicated r/w lock allows other tasks, e.g. other vCPUs, to
4449 * read vcpu->pid while this vCPU is in KVM_RUN, e.g. to yield
4450 * directly to this vCPU
4451 */
4452 oldpid = vcpu->pid;
4453 if (unlikely(oldpid != task_pid(current))) {
4454 /* The thread running this VCPU changed. */
4455 struct pid *newpid;
4456
4457 r = kvm_arch_vcpu_run_pid_change(vcpu);
4458 if (r)
4459 break;
4460
4461 newpid = get_task_pid(current, PIDTYPE_PID);
4462 write_lock(&vcpu->pid_lock);
4463 vcpu->pid = newpid;
4464 write_unlock(&vcpu->pid_lock);
4465
4466 put_pid(oldpid);
4467 }
4468 vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4469 r = kvm_arch_vcpu_ioctl_run(vcpu);
4470 vcpu->wants_to_run = false;
4471
4472 /*
4473 * FIXME: Remove this hack once all KVM architectures
4474 * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
4475 */
4476 rseq_virt_userspace_exit();
4477
4478 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4479 break;
4480 }
4481 case KVM_GET_REGS: {
4482 struct kvm_regs *kvm_regs;
4483
4484 r = -ENOMEM;
4485 kvm_regs = kzalloc_obj(struct kvm_regs);
4486 if (!kvm_regs)
4487 goto out;
4488 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4489 if (r)
4490 goto out_free1;
4491 r = -EFAULT;
4492 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4493 goto out_free1;
4494 r = 0;
4495 out_free1:
4496 kfree(kvm_regs);
4497 break;
4498 }
4499 case KVM_SET_REGS: {
4500 struct kvm_regs *kvm_regs;
4501
4502 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4503 if (IS_ERR(kvm_regs)) {
4504 r = PTR_ERR(kvm_regs);
4505 goto out;
4506 }
4507 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4508 kfree(kvm_regs);
4509 break;
4510 }
4511 case KVM_GET_SREGS: {
4512 kvm_sregs = kzalloc_obj(struct kvm_sregs);
4513 r = -ENOMEM;
4514 if (!kvm_sregs)
4515 goto out;
4516 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4517 if (r)
4518 goto out;
4519 r = -EFAULT;
4520 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4521 goto out;
4522 r = 0;
4523 break;
4524 }
4525 case KVM_SET_SREGS: {
4526 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4527 if (IS_ERR(kvm_sregs)) {
4528 r = PTR_ERR(kvm_sregs);
4529 kvm_sregs = NULL;
4530 goto out;
4531 }
4532 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4533 break;
4534 }
4535 case KVM_GET_MP_STATE: {
4536 struct kvm_mp_state mp_state;
4537
4538 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4539 if (r)
4540 goto out;
4541 r = -EFAULT;
4542 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4543 goto out;
4544 r = 0;
4545 break;
4546 }
4547 case KVM_SET_MP_STATE: {
4548 struct kvm_mp_state mp_state;
4549
4550 r = -EFAULT;
4551 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4552 goto out;
4553 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4554 break;
4555 }
4556 case KVM_TRANSLATE: {
4557 struct kvm_translation tr;
4558
4559 r = -EFAULT;
4560 if (copy_from_user(&tr, argp, sizeof(tr)))
4561 goto out;
4562 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4563 if (r)
4564 goto out;
4565 r = -EFAULT;
4566 if (copy_to_user(argp, &tr, sizeof(tr)))
4567 goto out;
4568 r = 0;
4569 break;
4570 }
4571 case KVM_SET_GUEST_DEBUG: {
4572 struct kvm_guest_debug dbg;
4573
4574 r = -EFAULT;
4575 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4576 goto out;
4577 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4578 break;
4579 }
4580 case KVM_SET_SIGNAL_MASK: {
4581 struct kvm_signal_mask __user *sigmask_arg = argp;
4582 struct kvm_signal_mask kvm_sigmask;
4583 sigset_t sigset, *p;
4584
4585 p = NULL;
4586 if (argp) {
4587 r = -EFAULT;
4588 if (copy_from_user(&kvm_sigmask, argp,
4589 sizeof(kvm_sigmask)))
4590 goto out;
4591 r = -EINVAL;
4592 if (kvm_sigmask.len != sizeof(sigset))
4593 goto out;
4594 r = -EFAULT;
4595 if (copy_from_user(&sigset, sigmask_arg->sigset,
4596 sizeof(sigset)))
4597 goto out;
4598 p = &sigset;
4599 }
4600 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4601 break;
4602 }
4603 case KVM_GET_FPU: {
4604 fpu = kzalloc_obj(struct kvm_fpu);
4605 r = -ENOMEM;
4606 if (!fpu)
4607 goto out;
4608 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4609 if (r)
4610 goto out;
4611 r = -EFAULT;
4612 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4613 goto out;
4614 r = 0;
4615 break;
4616 }
4617 case KVM_SET_FPU: {
4618 fpu = memdup_user(argp, sizeof(*fpu));
4619 if (IS_ERR(fpu)) {
4620 r = PTR_ERR(fpu);
4621 fpu = NULL;
4622 goto out;
4623 }
4624 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4625 break;
4626 }
4627 case KVM_GET_STATS_FD: {
4628 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4629 break;
4630 }
4631 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4632 case KVM_PRE_FAULT_MEMORY: {
4633 struct kvm_pre_fault_memory range;
4634
4635 r = -EFAULT;
4636 if (copy_from_user(&range, argp, sizeof(range)))
4637 break;
4638 r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4639 /* Pass back leftover range. */
4640 if (copy_to_user(argp, &range, sizeof(range)))
4641 r = -EFAULT;
4642 break;
4643 }
4644 #endif
4645 default:
4646 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4647 }
4648 out:
4649 mutex_unlock(&vcpu->mutex);
4650 kfree(fpu);
4651 kfree(kvm_sregs);
4652 return r;
4653 }
4654
4655 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4656 static long kvm_vcpu_compat_ioctl(struct file *filp,
4657 unsigned int ioctl, unsigned long arg)
4658 {
4659 struct kvm_vcpu *vcpu = filp->private_data;
4660 void __user *argp = compat_ptr(arg);
4661 int r;
4662
4663 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4664 return -EIO;
4665
4666 switch (ioctl) {
4667 case KVM_SET_SIGNAL_MASK: {
4668 struct kvm_signal_mask __user *sigmask_arg = argp;
4669 struct kvm_signal_mask kvm_sigmask;
4670 sigset_t sigset;
4671
4672 if (argp) {
4673 r = -EFAULT;
4674 if (copy_from_user(&kvm_sigmask, argp,
4675 sizeof(kvm_sigmask)))
4676 goto out;
4677 r = -EINVAL;
4678 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4679 goto out;
4680 r = -EFAULT;
4681 if (get_compat_sigset(&sigset,
4682 (compat_sigset_t __user *)sigmask_arg->sigset))
4683 goto out;
4684 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4685 } else
4686 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4687 break;
4688 }
4689 default:
4690 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4691 }
4692
4693 out:
4694 return r;
4695 }
4696 #endif
4697
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4698 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4699 {
4700 struct kvm_device *dev = filp->private_data;
4701
4702 if (dev->ops->mmap)
4703 return dev->ops->mmap(dev, vma);
4704
4705 return -ENODEV;
4706 }
4707
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4708 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4709 int (*accessor)(struct kvm_device *dev,
4710 struct kvm_device_attr *attr),
4711 unsigned long arg)
4712 {
4713 struct kvm_device_attr attr;
4714
4715 if (!accessor)
4716 return -EPERM;
4717
4718 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4719 return -EFAULT;
4720
4721 return accessor(dev, &attr);
4722 }
4723
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4724 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4725 unsigned long arg)
4726 {
4727 struct kvm_device *dev = filp->private_data;
4728
4729 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4730 return -EIO;
4731
4732 switch (ioctl) {
4733 case KVM_SET_DEVICE_ATTR:
4734 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4735 case KVM_GET_DEVICE_ATTR:
4736 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4737 case KVM_HAS_DEVICE_ATTR:
4738 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4739 default:
4740 if (dev->ops->ioctl)
4741 return dev->ops->ioctl(dev, ioctl, arg);
4742
4743 return -ENOTTY;
4744 }
4745 }
4746
kvm_device_release(struct inode * inode,struct file * filp)4747 static int kvm_device_release(struct inode *inode, struct file *filp)
4748 {
4749 struct kvm_device *dev = filp->private_data;
4750 struct kvm *kvm = dev->kvm;
4751
4752 if (dev->ops->release) {
4753 mutex_lock(&kvm->lock);
4754 list_del_rcu(&dev->vm_node);
4755 synchronize_rcu();
4756 dev->ops->release(dev);
4757 mutex_unlock(&kvm->lock);
4758 }
4759
4760 kvm_put_kvm(kvm);
4761 return 0;
4762 }
4763
4764 static struct file_operations kvm_device_fops = {
4765 .unlocked_ioctl = kvm_device_ioctl,
4766 .release = kvm_device_release,
4767 KVM_COMPAT(kvm_device_ioctl),
4768 .mmap = kvm_device_mmap,
4769 };
4770
kvm_device_from_filp(struct file * filp)4771 struct kvm_device *kvm_device_from_filp(struct file *filp)
4772 {
4773 if (filp->f_op != &kvm_device_fops)
4774 return NULL;
4775
4776 return filp->private_data;
4777 }
4778
4779 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4780 #ifdef CONFIG_KVM_MPIC
4781 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4782 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4783 #endif
4784 };
4785
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4786 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4787 {
4788 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4789 return -ENOSPC;
4790
4791 if (kvm_device_ops_table[type] != NULL)
4792 return -EEXIST;
4793
4794 kvm_device_ops_table[type] = ops;
4795 return 0;
4796 }
4797
kvm_unregister_device_ops(u32 type)4798 void kvm_unregister_device_ops(u32 type)
4799 {
4800 if (kvm_device_ops_table[type] != NULL)
4801 kvm_device_ops_table[type] = NULL;
4802 }
4803
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4804 static int kvm_ioctl_create_device(struct kvm *kvm,
4805 struct kvm_create_device *cd)
4806 {
4807 const struct kvm_device_ops *ops;
4808 struct kvm_device *dev;
4809 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4810 int type;
4811 int ret;
4812
4813 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4814 return -ENODEV;
4815
4816 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4817 ops = kvm_device_ops_table[type];
4818 if (ops == NULL)
4819 return -ENODEV;
4820
4821 if (test)
4822 return 0;
4823
4824 dev = kzalloc_obj(*dev, GFP_KERNEL_ACCOUNT);
4825 if (!dev)
4826 return -ENOMEM;
4827
4828 dev->ops = ops;
4829 dev->kvm = kvm;
4830
4831 mutex_lock(&kvm->lock);
4832 ret = ops->create(dev, type);
4833 if (ret < 0) {
4834 mutex_unlock(&kvm->lock);
4835 kfree(dev);
4836 return ret;
4837 }
4838 list_add_rcu(&dev->vm_node, &kvm->devices);
4839 mutex_unlock(&kvm->lock);
4840
4841 if (ops->init)
4842 ops->init(dev);
4843
4844 kvm_get_kvm(kvm);
4845 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4846 if (ret < 0) {
4847 kvm_put_kvm_no_destroy(kvm);
4848 mutex_lock(&kvm->lock);
4849 list_del_rcu(&dev->vm_node);
4850 synchronize_rcu();
4851 if (ops->release)
4852 ops->release(dev);
4853 mutex_unlock(&kvm->lock);
4854 if (ops->destroy)
4855 ops->destroy(dev);
4856 return ret;
4857 }
4858
4859 cd->fd = ret;
4860 return 0;
4861 }
4862
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)4863 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4864 {
4865 switch (arg) {
4866 case KVM_CAP_SYNC_MMU:
4867 case KVM_CAP_USER_MEMORY:
4868 case KVM_CAP_USER_MEMORY2:
4869 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4870 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4871 case KVM_CAP_INTERNAL_ERROR_DATA:
4872 #ifdef CONFIG_HAVE_KVM_MSI
4873 case KVM_CAP_SIGNAL_MSI:
4874 #endif
4875 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4876 case KVM_CAP_IRQFD:
4877 #endif
4878 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4879 case KVM_CAP_CHECK_EXTENSION_VM:
4880 case KVM_CAP_ENABLE_CAP_VM:
4881 case KVM_CAP_HALT_POLL:
4882 return 1;
4883 #ifdef CONFIG_KVM_MMIO
4884 case KVM_CAP_COALESCED_MMIO:
4885 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4886 case KVM_CAP_COALESCED_PIO:
4887 return 1;
4888 #endif
4889 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4890 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4891 return KVM_DIRTY_LOG_MANUAL_CAPS;
4892 #endif
4893 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4894 case KVM_CAP_IRQ_ROUTING:
4895 return KVM_MAX_IRQ_ROUTES;
4896 #endif
4897 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4898 case KVM_CAP_MULTI_ADDRESS_SPACE:
4899 if (kvm)
4900 return kvm_arch_nr_memslot_as_ids(kvm);
4901 return KVM_MAX_NR_ADDRESS_SPACES;
4902 #endif
4903 case KVM_CAP_NR_MEMSLOTS:
4904 return KVM_USER_MEM_SLOTS;
4905 case KVM_CAP_DIRTY_LOG_RING:
4906 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4907 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4908 #else
4909 return 0;
4910 #endif
4911 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4912 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4913 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4914 #else
4915 return 0;
4916 #endif
4917 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4918 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4919 #endif
4920 case KVM_CAP_BINARY_STATS_FD:
4921 case KVM_CAP_SYSTEM_EVENT_DATA:
4922 case KVM_CAP_DEVICE_CTRL:
4923 return 1;
4924 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4925 case KVM_CAP_MEMORY_ATTRIBUTES:
4926 return kvm_supported_mem_attributes(kvm);
4927 #endif
4928 #ifdef CONFIG_KVM_GUEST_MEMFD
4929 case KVM_CAP_GUEST_MEMFD:
4930 return 1;
4931 case KVM_CAP_GUEST_MEMFD_FLAGS:
4932 return kvm_gmem_get_supported_flags(kvm);
4933 #endif
4934 default:
4935 break;
4936 }
4937 return kvm_vm_ioctl_check_extension(kvm, arg);
4938 }
4939
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4940 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4941 {
4942 int r;
4943
4944 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4945 return -EINVAL;
4946
4947 /* the size should be power of 2 */
4948 if (!size || (size & (size - 1)))
4949 return -EINVAL;
4950
4951 /* Should be bigger to keep the reserved entries, or a page */
4952 if (size < kvm_dirty_ring_get_rsvd_entries(kvm) *
4953 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4954 return -EINVAL;
4955
4956 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4957 sizeof(struct kvm_dirty_gfn))
4958 return -E2BIG;
4959
4960 /* We only allow it to set once */
4961 if (kvm->dirty_ring_size)
4962 return -EINVAL;
4963
4964 mutex_lock(&kvm->lock);
4965
4966 if (kvm->created_vcpus) {
4967 /* We don't allow to change this value after vcpu created */
4968 r = -EINVAL;
4969 } else {
4970 kvm->dirty_ring_size = size;
4971 r = 0;
4972 }
4973
4974 mutex_unlock(&kvm->lock);
4975 return r;
4976 }
4977
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4978 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4979 {
4980 unsigned long i;
4981 struct kvm_vcpu *vcpu;
4982 int cleared = 0, r;
4983
4984 if (!kvm->dirty_ring_size)
4985 return -EINVAL;
4986
4987 mutex_lock(&kvm->slots_lock);
4988
4989 kvm_for_each_vcpu(i, vcpu, kvm) {
4990 r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared);
4991 if (r)
4992 break;
4993 }
4994
4995 mutex_unlock(&kvm->slots_lock);
4996
4997 if (cleared)
4998 kvm_flush_remote_tlbs(kvm);
4999
5000 return cleared;
5001 }
5002
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)5003 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5004 struct kvm_enable_cap *cap)
5005 {
5006 return -EINVAL;
5007 }
5008
kvm_are_all_memslots_empty(struct kvm * kvm)5009 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5010 {
5011 int i;
5012
5013 lockdep_assert_held(&kvm->slots_lock);
5014
5015 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5016 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5017 return false;
5018 }
5019
5020 return true;
5021 }
5022 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
5023
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)5024 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5025 struct kvm_enable_cap *cap)
5026 {
5027 switch (cap->cap) {
5028 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5029 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5030 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5031
5032 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5033 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5034
5035 if (cap->flags || (cap->args[0] & ~allowed_options))
5036 return -EINVAL;
5037 kvm->manual_dirty_log_protect = cap->args[0];
5038 return 0;
5039 }
5040 #endif
5041 case KVM_CAP_HALT_POLL: {
5042 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5043 return -EINVAL;
5044
5045 kvm->max_halt_poll_ns = cap->args[0];
5046
5047 /*
5048 * Ensure kvm->override_halt_poll_ns does not become visible
5049 * before kvm->max_halt_poll_ns.
5050 *
5051 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5052 */
5053 smp_wmb();
5054 kvm->override_halt_poll_ns = true;
5055
5056 return 0;
5057 }
5058 case KVM_CAP_DIRTY_LOG_RING:
5059 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5060 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5061 return -EINVAL;
5062
5063 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5064 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5065 int r = -EINVAL;
5066
5067 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5068 !kvm->dirty_ring_size || cap->flags)
5069 return r;
5070
5071 mutex_lock(&kvm->slots_lock);
5072
5073 /*
5074 * For simplicity, allow enabling ring+bitmap if and only if
5075 * there are no memslots, e.g. to ensure all memslots allocate
5076 * a bitmap after the capability is enabled.
5077 */
5078 if (kvm_are_all_memslots_empty(kvm)) {
5079 kvm->dirty_ring_with_bitmap = true;
5080 r = 0;
5081 }
5082
5083 mutex_unlock(&kvm->slots_lock);
5084
5085 return r;
5086 }
5087 default:
5088 return kvm_vm_ioctl_enable_cap(kvm, cap);
5089 }
5090 }
5091
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)5092 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5093 size_t size, loff_t *offset)
5094 {
5095 struct kvm *kvm = file->private_data;
5096
5097 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5098 &kvm_vm_stats_desc[0], &kvm->stat,
5099 sizeof(kvm->stat), user_buffer, size, offset);
5100 }
5101
kvm_vm_stats_release(struct inode * inode,struct file * file)5102 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5103 {
5104 struct kvm *kvm = file->private_data;
5105
5106 kvm_put_kvm(kvm);
5107 return 0;
5108 }
5109
5110 static const struct file_operations kvm_vm_stats_fops = {
5111 .owner = THIS_MODULE,
5112 .read = kvm_vm_stats_read,
5113 .release = kvm_vm_stats_release,
5114 .llseek = noop_llseek,
5115 };
5116
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)5117 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5118 {
5119 int fd;
5120 struct file *file;
5121
5122 fd = get_unused_fd_flags(O_CLOEXEC);
5123 if (fd < 0)
5124 return fd;
5125
5126 file = anon_inode_getfile_fmode("kvm-vm-stats",
5127 &kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD);
5128 if (IS_ERR(file)) {
5129 put_unused_fd(fd);
5130 return PTR_ERR(file);
5131 }
5132
5133 kvm_get_kvm(kvm);
5134 fd_install(fd, file);
5135
5136 return fd;
5137 }
5138
5139 #define SANITY_CHECK_MEM_REGION_FIELD(field) \
5140 do { \
5141 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5142 offsetof(struct kvm_userspace_memory_region2, field)); \
5143 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5144 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5145 } while (0)
5146
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5147 static long kvm_vm_ioctl(struct file *filp,
5148 unsigned int ioctl, unsigned long arg)
5149 {
5150 struct kvm *kvm = filp->private_data;
5151 void __user *argp = (void __user *)arg;
5152 int r;
5153
5154 if (kvm->mm != current->mm || kvm->vm_dead)
5155 return -EIO;
5156 switch (ioctl) {
5157 case KVM_CREATE_VCPU:
5158 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5159 break;
5160 case KVM_ENABLE_CAP: {
5161 struct kvm_enable_cap cap;
5162
5163 r = -EFAULT;
5164 if (copy_from_user(&cap, argp, sizeof(cap)))
5165 goto out;
5166 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5167 break;
5168 }
5169 case KVM_SET_USER_MEMORY_REGION2:
5170 case KVM_SET_USER_MEMORY_REGION: {
5171 struct kvm_userspace_memory_region2 mem;
5172 unsigned long size;
5173
5174 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5175 /*
5176 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5177 * accessed, but avoid leaking kernel memory in case of a bug.
5178 */
5179 memset(&mem, 0, sizeof(mem));
5180 size = sizeof(struct kvm_userspace_memory_region);
5181 } else {
5182 size = sizeof(struct kvm_userspace_memory_region2);
5183 }
5184
5185 /* Ensure the common parts of the two structs are identical. */
5186 SANITY_CHECK_MEM_REGION_FIELD(slot);
5187 SANITY_CHECK_MEM_REGION_FIELD(flags);
5188 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5189 SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5190 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5191
5192 r = -EFAULT;
5193 if (copy_from_user(&mem, argp, size))
5194 goto out;
5195
5196 r = -EINVAL;
5197 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5198 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5199 goto out;
5200
5201 r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5202 break;
5203 }
5204 case KVM_GET_DIRTY_LOG: {
5205 struct kvm_dirty_log log;
5206
5207 r = -EFAULT;
5208 if (copy_from_user(&log, argp, sizeof(log)))
5209 goto out;
5210 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5211 break;
5212 }
5213 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5214 case KVM_CLEAR_DIRTY_LOG: {
5215 struct kvm_clear_dirty_log log;
5216
5217 r = -EFAULT;
5218 if (copy_from_user(&log, argp, sizeof(log)))
5219 goto out;
5220 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5221 break;
5222 }
5223 #endif
5224 #ifdef CONFIG_KVM_MMIO
5225 case KVM_REGISTER_COALESCED_MMIO: {
5226 struct kvm_coalesced_mmio_zone zone;
5227
5228 r = -EFAULT;
5229 if (copy_from_user(&zone, argp, sizeof(zone)))
5230 goto out;
5231 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5232 break;
5233 }
5234 case KVM_UNREGISTER_COALESCED_MMIO: {
5235 struct kvm_coalesced_mmio_zone zone;
5236
5237 r = -EFAULT;
5238 if (copy_from_user(&zone, argp, sizeof(zone)))
5239 goto out;
5240 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5241 break;
5242 }
5243 #endif
5244 case KVM_IRQFD: {
5245 struct kvm_irqfd data;
5246
5247 r = -EFAULT;
5248 if (copy_from_user(&data, argp, sizeof(data)))
5249 goto out;
5250 r = kvm_irqfd(kvm, &data);
5251 break;
5252 }
5253 case KVM_IOEVENTFD: {
5254 struct kvm_ioeventfd data;
5255
5256 r = -EFAULT;
5257 if (copy_from_user(&data, argp, sizeof(data)))
5258 goto out;
5259 r = kvm_ioeventfd(kvm, &data);
5260 break;
5261 }
5262 #ifdef CONFIG_HAVE_KVM_MSI
5263 case KVM_SIGNAL_MSI: {
5264 struct kvm_msi msi;
5265
5266 r = -EFAULT;
5267 if (copy_from_user(&msi, argp, sizeof(msi)))
5268 goto out;
5269 r = kvm_send_userspace_msi(kvm, &msi);
5270 break;
5271 }
5272 #endif
5273 #ifdef __KVM_HAVE_IRQ_LINE
5274 case KVM_IRQ_LINE_STATUS:
5275 case KVM_IRQ_LINE: {
5276 struct kvm_irq_level irq_event;
5277
5278 r = -EFAULT;
5279 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5280 goto out;
5281
5282 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5283 ioctl == KVM_IRQ_LINE_STATUS);
5284 if (r)
5285 goto out;
5286
5287 r = -EFAULT;
5288 if (ioctl == KVM_IRQ_LINE_STATUS) {
5289 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5290 goto out;
5291 }
5292
5293 r = 0;
5294 break;
5295 }
5296 #endif
5297 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5298 case KVM_SET_GSI_ROUTING: {
5299 struct kvm_irq_routing routing;
5300 struct kvm_irq_routing __user *urouting;
5301 struct kvm_irq_routing_entry *entries = NULL;
5302
5303 r = -EFAULT;
5304 if (copy_from_user(&routing, argp, sizeof(routing)))
5305 goto out;
5306 r = -EINVAL;
5307 if (!kvm_arch_can_set_irq_routing(kvm))
5308 goto out;
5309 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5310 goto out;
5311 if (routing.flags)
5312 goto out;
5313 if (routing.nr) {
5314 urouting = argp;
5315 entries = vmemdup_array_user(urouting->entries,
5316 routing.nr, sizeof(*entries));
5317 if (IS_ERR(entries)) {
5318 r = PTR_ERR(entries);
5319 goto out;
5320 }
5321 }
5322 r = kvm_set_irq_routing(kvm, entries, routing.nr,
5323 routing.flags);
5324 kvfree(entries);
5325 break;
5326 }
5327 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5328 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5329 case KVM_SET_MEMORY_ATTRIBUTES: {
5330 struct kvm_memory_attributes attrs;
5331
5332 r = -EFAULT;
5333 if (copy_from_user(&attrs, argp, sizeof(attrs)))
5334 goto out;
5335
5336 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5337 break;
5338 }
5339 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5340 case KVM_CREATE_DEVICE: {
5341 struct kvm_create_device cd;
5342
5343 r = -EFAULT;
5344 if (copy_from_user(&cd, argp, sizeof(cd)))
5345 goto out;
5346
5347 r = kvm_ioctl_create_device(kvm, &cd);
5348 if (r)
5349 goto out;
5350
5351 r = -EFAULT;
5352 if (copy_to_user(argp, &cd, sizeof(cd)))
5353 goto out;
5354
5355 r = 0;
5356 break;
5357 }
5358 case KVM_CHECK_EXTENSION:
5359 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5360 break;
5361 case KVM_RESET_DIRTY_RINGS:
5362 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5363 break;
5364 case KVM_GET_STATS_FD:
5365 r = kvm_vm_ioctl_get_stats_fd(kvm);
5366 break;
5367 #ifdef CONFIG_KVM_GUEST_MEMFD
5368 case KVM_CREATE_GUEST_MEMFD: {
5369 struct kvm_create_guest_memfd guest_memfd;
5370
5371 r = -EFAULT;
5372 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5373 goto out;
5374
5375 r = kvm_gmem_create(kvm, &guest_memfd);
5376 break;
5377 }
5378 #endif
5379 default:
5380 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5381 }
5382 out:
5383 return r;
5384 }
5385
5386 #ifdef CONFIG_KVM_COMPAT
5387 struct compat_kvm_dirty_log {
5388 __u32 slot;
5389 __u32 padding1;
5390 union {
5391 compat_uptr_t dirty_bitmap; /* one bit per page */
5392 __u64 padding2;
5393 };
5394 };
5395
5396 struct compat_kvm_clear_dirty_log {
5397 __u32 slot;
5398 __u32 num_pages;
5399 __u64 first_page;
5400 union {
5401 compat_uptr_t dirty_bitmap; /* one bit per page */
5402 __u64 padding2;
5403 };
5404 };
5405
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5406 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5407 unsigned long arg)
5408 {
5409 return -ENOTTY;
5410 }
5411
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5412 static long kvm_vm_compat_ioctl(struct file *filp,
5413 unsigned int ioctl, unsigned long arg)
5414 {
5415 struct kvm *kvm = filp->private_data;
5416 int r;
5417
5418 if (kvm->mm != current->mm || kvm->vm_dead)
5419 return -EIO;
5420
5421 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5422 if (r != -ENOTTY)
5423 return r;
5424
5425 switch (ioctl) {
5426 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5427 case KVM_CLEAR_DIRTY_LOG: {
5428 struct compat_kvm_clear_dirty_log compat_log;
5429 struct kvm_clear_dirty_log log;
5430
5431 if (copy_from_user(&compat_log, (void __user *)arg,
5432 sizeof(compat_log)))
5433 return -EFAULT;
5434 log.slot = compat_log.slot;
5435 log.num_pages = compat_log.num_pages;
5436 log.first_page = compat_log.first_page;
5437 log.padding2 = compat_log.padding2;
5438 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5439
5440 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5441 break;
5442 }
5443 #endif
5444 case KVM_GET_DIRTY_LOG: {
5445 struct compat_kvm_dirty_log compat_log;
5446 struct kvm_dirty_log log;
5447
5448 if (copy_from_user(&compat_log, (void __user *)arg,
5449 sizeof(compat_log)))
5450 return -EFAULT;
5451 log.slot = compat_log.slot;
5452 log.padding1 = compat_log.padding1;
5453 log.padding2 = compat_log.padding2;
5454 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5455
5456 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5457 break;
5458 }
5459 default:
5460 r = kvm_vm_ioctl(filp, ioctl, arg);
5461 }
5462 return r;
5463 }
5464 #endif
5465
5466 static struct file_operations kvm_vm_fops = {
5467 .release = kvm_vm_release,
5468 .unlocked_ioctl = kvm_vm_ioctl,
5469 .llseek = noop_llseek,
5470 KVM_COMPAT(kvm_vm_compat_ioctl),
5471 };
5472
file_is_kvm(struct file * file)5473 bool file_is_kvm(struct file *file)
5474 {
5475 return file && file->f_op == &kvm_vm_fops;
5476 }
5477 EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
5478
kvm_dev_ioctl_create_vm(unsigned long type)5479 static int kvm_dev_ioctl_create_vm(unsigned long type)
5480 {
5481 char fdname[ITOA_MAX_LEN + 1];
5482 int r, fd;
5483 struct kvm *kvm;
5484 struct file *file;
5485
5486 fd = get_unused_fd_flags(O_CLOEXEC);
5487 if (fd < 0)
5488 return fd;
5489
5490 snprintf(fdname, sizeof(fdname), "%d", fd);
5491
5492 kvm = kvm_create_vm(type, fdname);
5493 if (IS_ERR(kvm)) {
5494 r = PTR_ERR(kvm);
5495 goto put_fd;
5496 }
5497
5498 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5499 if (IS_ERR(file)) {
5500 r = PTR_ERR(file);
5501 goto put_kvm;
5502 }
5503
5504 /*
5505 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5506 * already set, with ->release() being kvm_vm_release(). In error
5507 * cases it will be called by the final fput(file) and will take
5508 * care of doing kvm_put_kvm(kvm).
5509 */
5510 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5511
5512 fd_install(fd, file);
5513 return fd;
5514
5515 put_kvm:
5516 kvm_put_kvm(kvm);
5517 put_fd:
5518 put_unused_fd(fd);
5519 return r;
5520 }
5521
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5522 static long kvm_dev_ioctl(struct file *filp,
5523 unsigned int ioctl, unsigned long arg)
5524 {
5525 int r = -EINVAL;
5526
5527 switch (ioctl) {
5528 case KVM_GET_API_VERSION:
5529 if (arg)
5530 goto out;
5531 r = KVM_API_VERSION;
5532 break;
5533 case KVM_CREATE_VM:
5534 r = kvm_dev_ioctl_create_vm(arg);
5535 break;
5536 case KVM_CHECK_EXTENSION:
5537 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5538 break;
5539 case KVM_GET_VCPU_MMAP_SIZE:
5540 if (arg)
5541 goto out;
5542 r = PAGE_SIZE; /* struct kvm_run */
5543 #ifdef CONFIG_X86
5544 r += PAGE_SIZE; /* pio data page */
5545 #endif
5546 #ifdef CONFIG_KVM_MMIO
5547 r += PAGE_SIZE; /* coalesced mmio ring page */
5548 #endif
5549 break;
5550 default:
5551 return kvm_arch_dev_ioctl(filp, ioctl, arg);
5552 }
5553 out:
5554 return r;
5555 }
5556
5557 static struct file_operations kvm_chardev_ops = {
5558 .unlocked_ioctl = kvm_dev_ioctl,
5559 .llseek = noop_llseek,
5560 KVM_COMPAT(kvm_dev_ioctl),
5561 };
5562
5563 static struct miscdevice kvm_dev = {
5564 KVM_MINOR,
5565 "kvm",
5566 &kvm_chardev_ops,
5567 };
5568
5569 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5570 bool __ro_after_init enable_virt_at_load = true;
5571 module_param(enable_virt_at_load, bool, 0444);
5572 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
5573
5574 static DEFINE_PER_CPU(bool, virtualization_enabled);
5575 static DEFINE_MUTEX(kvm_usage_lock);
5576 static int kvm_usage_count;
5577
kvm_arch_shutdown(void)5578 __weak void kvm_arch_shutdown(void)
5579 {
5580
5581 }
5582
kvm_arch_enable_virtualization(void)5583 __weak void kvm_arch_enable_virtualization(void)
5584 {
5585
5586 }
5587
kvm_arch_disable_virtualization(void)5588 __weak void kvm_arch_disable_virtualization(void)
5589 {
5590
5591 }
5592
kvm_enable_virtualization_cpu(void)5593 static int kvm_enable_virtualization_cpu(void)
5594 {
5595 if (__this_cpu_read(virtualization_enabled))
5596 return 0;
5597
5598 if (kvm_arch_enable_virtualization_cpu()) {
5599 pr_info("kvm: enabling virtualization on CPU%d failed\n",
5600 raw_smp_processor_id());
5601 return -EIO;
5602 }
5603
5604 __this_cpu_write(virtualization_enabled, true);
5605 return 0;
5606 }
5607
kvm_online_cpu(unsigned int cpu)5608 static int kvm_online_cpu(unsigned int cpu)
5609 {
5610 /*
5611 * Abort the CPU online process if hardware virtualization cannot
5612 * be enabled. Otherwise running VMs would encounter unrecoverable
5613 * errors when scheduled to this CPU.
5614 */
5615 return kvm_enable_virtualization_cpu();
5616 }
5617
kvm_disable_virtualization_cpu(void * ign)5618 static void kvm_disable_virtualization_cpu(void *ign)
5619 {
5620 if (!__this_cpu_read(virtualization_enabled))
5621 return;
5622
5623 kvm_arch_disable_virtualization_cpu();
5624
5625 __this_cpu_write(virtualization_enabled, false);
5626 }
5627
kvm_offline_cpu(unsigned int cpu)5628 static int kvm_offline_cpu(unsigned int cpu)
5629 {
5630 kvm_disable_virtualization_cpu(NULL);
5631 return 0;
5632 }
5633
kvm_shutdown(void * data)5634 static void kvm_shutdown(void *data)
5635 {
5636 kvm_arch_shutdown();
5637
5638 /*
5639 * Some flavors of hardware virtualization need to be disabled before
5640 * transferring control to firmware (to perform shutdown/reboot), e.g.
5641 * on x86, virtualization can block INIT interrupts, which are used by
5642 * firmware to pull APs back under firmware control. Note, this path
5643 * is used for both shutdown and reboot scenarios, i.e. neither name is
5644 * 100% comprehensive.
5645 */
5646 pr_info("kvm: exiting hardware virtualization\n");
5647 on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
5648 }
5649
kvm_suspend(void * data)5650 static int kvm_suspend(void *data)
5651 {
5652 /*
5653 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5654 * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
5655 * count is stable. Assert that kvm_usage_lock is not held to ensure
5656 * the system isn't suspended while KVM is enabling hardware. Hardware
5657 * enabling can be preempted, but the task cannot be frozen until it has
5658 * dropped all locks (userspace tasks are frozen via a fake signal).
5659 */
5660 lockdep_assert_not_held(&kvm_usage_lock);
5661 lockdep_assert_irqs_disabled();
5662
5663 kvm_disable_virtualization_cpu(NULL);
5664 return 0;
5665 }
5666
kvm_resume(void * data)5667 static void kvm_resume(void *data)
5668 {
5669 lockdep_assert_not_held(&kvm_usage_lock);
5670 lockdep_assert_irqs_disabled();
5671
5672 WARN_ON_ONCE(kvm_enable_virtualization_cpu());
5673 }
5674
5675 static const struct syscore_ops kvm_syscore_ops = {
5676 .suspend = kvm_suspend,
5677 .resume = kvm_resume,
5678 .shutdown = kvm_shutdown,
5679 };
5680
5681 static struct syscore kvm_syscore = {
5682 .ops = &kvm_syscore_ops,
5683 };
5684
kvm_enable_virtualization(void)5685 static int kvm_enable_virtualization(void)
5686 {
5687 int r;
5688
5689 guard(mutex)(&kvm_usage_lock);
5690
5691 if (kvm_usage_count++)
5692 return 0;
5693
5694 kvm_arch_enable_virtualization();
5695
5696 r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
5697 kvm_online_cpu, kvm_offline_cpu);
5698 if (r)
5699 goto err_cpuhp;
5700
5701 register_syscore(&kvm_syscore);
5702
5703 /*
5704 * Undo virtualization enabling and bail if the system is going down.
5705 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5706 * possible for an in-flight operation to enable virtualization after
5707 * syscore_shutdown() is called, i.e. without kvm_shutdown() being
5708 * invoked. Note, this relies on system_state being set _before_
5709 * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
5710 * or this CPU observes the impending shutdown. Which is why KVM uses
5711 * a syscore ops hook instead of registering a dedicated reboot
5712 * notifier (the latter runs before system_state is updated).
5713 */
5714 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5715 system_state == SYSTEM_RESTART) {
5716 r = -EBUSY;
5717 goto err_rebooting;
5718 }
5719
5720 return 0;
5721
5722 err_rebooting:
5723 unregister_syscore(&kvm_syscore);
5724 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5725 err_cpuhp:
5726 kvm_arch_disable_virtualization();
5727 --kvm_usage_count;
5728 return r;
5729 }
5730
kvm_disable_virtualization(void)5731 static void kvm_disable_virtualization(void)
5732 {
5733 guard(mutex)(&kvm_usage_lock);
5734
5735 if (--kvm_usage_count)
5736 return;
5737
5738 unregister_syscore(&kvm_syscore);
5739 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5740 kvm_arch_disable_virtualization();
5741 }
5742
kvm_init_virtualization(void)5743 static int kvm_init_virtualization(void)
5744 {
5745 if (enable_virt_at_load)
5746 return kvm_enable_virtualization();
5747
5748 return 0;
5749 }
5750
kvm_uninit_virtualization(void)5751 static void kvm_uninit_virtualization(void)
5752 {
5753 if (enable_virt_at_load)
5754 kvm_disable_virtualization();
5755 }
5756 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
kvm_enable_virtualization(void)5757 static int kvm_enable_virtualization(void)
5758 {
5759 return 0;
5760 }
kvm_disable_virtualization(void)5761 static void kvm_disable_virtualization(void)
5762 {
5763
5764 }
kvm_init_virtualization(void)5765 static int kvm_init_virtualization(void)
5766 {
5767 return 0;
5768 }
5769
kvm_uninit_virtualization(void)5770 static void kvm_uninit_virtualization(void)
5771 {
5772
5773 }
5774 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5775
kvm_iodevice_destructor(struct kvm_io_device * dev)5776 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5777 {
5778 if (dev->ops->destructor)
5779 dev->ops->destructor(dev);
5780 }
5781
kvm_io_bus_destroy(struct kvm_io_bus * bus)5782 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5783 {
5784 int i;
5785
5786 for (i = 0; i < bus->dev_count; i++) {
5787 struct kvm_io_device *pos = bus->range[i].dev;
5788
5789 kvm_iodevice_destructor(pos);
5790 }
5791 kfree(bus);
5792 }
5793
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5794 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5795 const struct kvm_io_range *r2)
5796 {
5797 gpa_t addr1 = r1->addr;
5798 gpa_t addr2 = r2->addr;
5799
5800 if (addr1 < addr2)
5801 return -1;
5802
5803 /* If r2->len == 0, match the exact address. If r2->len != 0,
5804 * accept any overlapping write. Any order is acceptable for
5805 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5806 * we process all of them.
5807 */
5808 if (r2->len) {
5809 addr1 += r1->len;
5810 addr2 += r2->len;
5811 }
5812
5813 if (addr1 > addr2)
5814 return 1;
5815
5816 return 0;
5817 }
5818
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5819 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5820 {
5821 return kvm_io_bus_cmp(p1, p2);
5822 }
5823
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5824 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5825 gpa_t addr, int len)
5826 {
5827 struct kvm_io_range *range, key;
5828 int off;
5829
5830 key = (struct kvm_io_range) {
5831 .addr = addr,
5832 .len = len,
5833 };
5834
5835 range = bsearch(&key, bus->range, bus->dev_count,
5836 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5837 if (range == NULL)
5838 return -ENOENT;
5839
5840 off = range - bus->range;
5841
5842 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5843 off--;
5844
5845 return off;
5846 }
5847
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5848 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5849 struct kvm_io_range *range, const void *val)
5850 {
5851 int idx;
5852
5853 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5854 if (idx < 0)
5855 return -EOPNOTSUPP;
5856
5857 while (idx < bus->dev_count &&
5858 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5859 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5860 range->len, val))
5861 return idx;
5862 idx++;
5863 }
5864
5865 return -EOPNOTSUPP;
5866 }
5867
kvm_get_bus_srcu(struct kvm * kvm,enum kvm_bus idx)5868 static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
5869 {
5870 /*
5871 * Ensure that any updates to kvm_buses[] observed by the previous vCPU
5872 * machine instruction are also visible to the vCPU machine instruction
5873 * that triggered this call.
5874 */
5875 smp_mb__after_srcu_read_lock();
5876
5877 return srcu_dereference(kvm->buses[idx], &kvm->srcu);
5878 }
5879
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5880 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5881 int len, const void *val)
5882 {
5883 struct kvm_io_bus *bus;
5884 struct kvm_io_range range;
5885 int r;
5886
5887 range = (struct kvm_io_range) {
5888 .addr = addr,
5889 .len = len,
5890 };
5891
5892 bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5893 if (!bus)
5894 return -ENOMEM;
5895 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5896 return r < 0 ? r : 0;
5897 }
5898 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
5899
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5900 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5901 gpa_t addr, int len, const void *val, long cookie)
5902 {
5903 struct kvm_io_bus *bus;
5904 struct kvm_io_range range;
5905
5906 range = (struct kvm_io_range) {
5907 .addr = addr,
5908 .len = len,
5909 };
5910
5911 bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5912 if (!bus)
5913 return -ENOMEM;
5914
5915 /* First try the device referenced by cookie. */
5916 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5917 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5918 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5919 val))
5920 return cookie;
5921
5922 /*
5923 * cookie contained garbage; fall back to search and return the
5924 * correct cookie value.
5925 */
5926 return __kvm_io_bus_write(vcpu, bus, &range, val);
5927 }
5928
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5929 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5930 struct kvm_io_range *range, void *val)
5931 {
5932 int idx;
5933
5934 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5935 if (idx < 0)
5936 return -EOPNOTSUPP;
5937
5938 while (idx < bus->dev_count &&
5939 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5940 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5941 range->len, val))
5942 return idx;
5943 idx++;
5944 }
5945
5946 return -EOPNOTSUPP;
5947 }
5948
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5949 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5950 int len, void *val)
5951 {
5952 struct kvm_io_bus *bus;
5953 struct kvm_io_range range;
5954 int r;
5955
5956 range = (struct kvm_io_range) {
5957 .addr = addr,
5958 .len = len,
5959 };
5960
5961 bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5962 if (!bus)
5963 return -ENOMEM;
5964 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5965 return r < 0 ? r : 0;
5966 }
5967 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
5968
__free_bus(struct rcu_head * rcu)5969 static void __free_bus(struct rcu_head *rcu)
5970 {
5971 struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
5972
5973 kfree(bus);
5974 }
5975
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5976 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5977 int len, struct kvm_io_device *dev)
5978 {
5979 int i;
5980 struct kvm_io_bus *new_bus, *bus;
5981 struct kvm_io_range range;
5982
5983 lockdep_assert_held(&kvm->slots_lock);
5984
5985 bus = kvm_get_bus(kvm, bus_idx);
5986 if (!bus)
5987 return -ENOMEM;
5988
5989 /* exclude ioeventfd which is limited by maximum fd */
5990 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5991 return -ENOSPC;
5992
5993 new_bus = kmalloc_flex(*bus, range, bus->dev_count + 1,
5994 GFP_KERNEL_ACCOUNT);
5995 if (!new_bus)
5996 return -ENOMEM;
5997
5998 range = (struct kvm_io_range) {
5999 .addr = addr,
6000 .len = len,
6001 .dev = dev,
6002 };
6003
6004 for (i = 0; i < bus->dev_count; i++)
6005 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
6006 break;
6007
6008 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
6009 new_bus->dev_count++;
6010 new_bus->range[i] = range;
6011 memcpy(new_bus->range + i + 1, bus->range + i,
6012 (bus->dev_count - i) * sizeof(struct kvm_io_range));
6013 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6014 call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
6015
6016 return 0;
6017 }
6018
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)6019 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6020 struct kvm_io_device *dev)
6021 {
6022 int i;
6023 struct kvm_io_bus *new_bus, *bus;
6024
6025 lockdep_assert_held(&kvm->slots_lock);
6026
6027 bus = kvm_get_bus(kvm, bus_idx);
6028 if (!bus)
6029 return 0;
6030
6031 for (i = 0; i < bus->dev_count; i++) {
6032 if (bus->range[i].dev == dev) {
6033 break;
6034 }
6035 }
6036
6037 if (i == bus->dev_count)
6038 return 0;
6039
6040 new_bus = kmalloc_flex(*bus, range, bus->dev_count - 1,
6041 GFP_KERNEL_ACCOUNT);
6042 if (new_bus) {
6043 memcpy(new_bus, bus, struct_size(bus, range, i));
6044 new_bus->dev_count--;
6045 memcpy(new_bus->range + i, bus->range + i + 1,
6046 flex_array_size(new_bus, range, new_bus->dev_count - i));
6047 }
6048
6049 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6050 synchronize_srcu_expedited(&kvm->srcu);
6051
6052 /*
6053 * If NULL bus is installed, destroy the old bus, including all the
6054 * attached devices. Otherwise, destroy the caller's device only.
6055 */
6056 if (!new_bus) {
6057 pr_err("kvm: failed to shrink bus, removing it completely\n");
6058 kvm_io_bus_destroy(bus);
6059 return -ENOMEM;
6060 }
6061
6062 kvm_iodevice_destructor(dev);
6063 kfree(bus);
6064 return 0;
6065 }
6066
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)6067 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6068 gpa_t addr)
6069 {
6070 struct kvm_io_bus *bus;
6071 int dev_idx, srcu_idx;
6072 struct kvm_io_device *iodev = NULL;
6073
6074 srcu_idx = srcu_read_lock(&kvm->srcu);
6075
6076 bus = kvm_get_bus_srcu(kvm, bus_idx);
6077 if (!bus)
6078 goto out_unlock;
6079
6080 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6081 if (dev_idx < 0)
6082 goto out_unlock;
6083
6084 iodev = bus->range[dev_idx].dev;
6085
6086 out_unlock:
6087 srcu_read_unlock(&kvm->srcu, srcu_idx);
6088
6089 return iodev;
6090 }
6091 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
6092
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)6093 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6094 int (*get)(void *, u64 *), int (*set)(void *, u64),
6095 const char *fmt)
6096 {
6097 int ret;
6098 struct kvm_stat_data *stat_data = inode->i_private;
6099
6100 /*
6101 * The debugfs files are a reference to the kvm struct which
6102 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
6103 * avoids the race between open and the removal of the debugfs directory.
6104 */
6105 if (!kvm_get_kvm_safe(stat_data->kvm))
6106 return -ENOENT;
6107
6108 ret = simple_attr_open(inode, file, get,
6109 kvm_stats_debugfs_mode(stat_data->desc) & 0222
6110 ? set : NULL, fmt);
6111 if (ret)
6112 kvm_put_kvm(stat_data->kvm);
6113
6114 return ret;
6115 }
6116
kvm_debugfs_release(struct inode * inode,struct file * file)6117 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6118 {
6119 struct kvm_stat_data *stat_data = inode->i_private;
6120
6121 simple_attr_release(inode, file);
6122 kvm_put_kvm(stat_data->kvm);
6123
6124 return 0;
6125 }
6126
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)6127 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6128 {
6129 *val = *(u64 *)((void *)(&kvm->stat) + offset);
6130
6131 return 0;
6132 }
6133
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)6134 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6135 {
6136 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
6137
6138 return 0;
6139 }
6140
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)6141 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6142 {
6143 unsigned long i;
6144 struct kvm_vcpu *vcpu;
6145
6146 *val = 0;
6147
6148 kvm_for_each_vcpu(i, vcpu, kvm)
6149 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
6150
6151 return 0;
6152 }
6153
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)6154 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6155 {
6156 unsigned long i;
6157 struct kvm_vcpu *vcpu;
6158
6159 kvm_for_each_vcpu(i, vcpu, kvm)
6160 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6161
6162 return 0;
6163 }
6164
kvm_stat_data_get(void * data,u64 * val)6165 static int kvm_stat_data_get(void *data, u64 *val)
6166 {
6167 int r = -EFAULT;
6168 struct kvm_stat_data *stat_data = data;
6169
6170 switch (stat_data->kind) {
6171 case KVM_STAT_VM:
6172 r = kvm_get_stat_per_vm(stat_data->kvm,
6173 stat_data->desc->offset, val);
6174 break;
6175 case KVM_STAT_VCPU:
6176 r = kvm_get_stat_per_vcpu(stat_data->kvm,
6177 stat_data->desc->offset, val);
6178 break;
6179 }
6180
6181 return r;
6182 }
6183
kvm_stat_data_clear(void * data,u64 val)6184 static int kvm_stat_data_clear(void *data, u64 val)
6185 {
6186 int r = -EFAULT;
6187 struct kvm_stat_data *stat_data = data;
6188
6189 if (val)
6190 return -EINVAL;
6191
6192 switch (stat_data->kind) {
6193 case KVM_STAT_VM:
6194 r = kvm_clear_stat_per_vm(stat_data->kvm,
6195 stat_data->desc->offset);
6196 break;
6197 case KVM_STAT_VCPU:
6198 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6199 stat_data->desc->offset);
6200 break;
6201 }
6202
6203 return r;
6204 }
6205
kvm_stat_data_open(struct inode * inode,struct file * file)6206 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6207 {
6208 __simple_attr_check_format("%llu\n", 0ull);
6209 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6210 kvm_stat_data_clear, "%llu\n");
6211 }
6212
6213 static const struct file_operations stat_fops_per_vm = {
6214 .owner = THIS_MODULE,
6215 .open = kvm_stat_data_open,
6216 .release = kvm_debugfs_release,
6217 .read = simple_attr_read,
6218 .write = simple_attr_write,
6219 };
6220
vm_stat_get(void * _offset,u64 * val)6221 static int vm_stat_get(void *_offset, u64 *val)
6222 {
6223 unsigned offset = (long)_offset;
6224 struct kvm *kvm;
6225 u64 tmp_val;
6226
6227 *val = 0;
6228 mutex_lock(&kvm_lock);
6229 list_for_each_entry(kvm, &vm_list, vm_list) {
6230 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6231 *val += tmp_val;
6232 }
6233 mutex_unlock(&kvm_lock);
6234 return 0;
6235 }
6236
vm_stat_clear(void * _offset,u64 val)6237 static int vm_stat_clear(void *_offset, u64 val)
6238 {
6239 unsigned offset = (long)_offset;
6240 struct kvm *kvm;
6241
6242 if (val)
6243 return -EINVAL;
6244
6245 mutex_lock(&kvm_lock);
6246 list_for_each_entry(kvm, &vm_list, vm_list) {
6247 kvm_clear_stat_per_vm(kvm, offset);
6248 }
6249 mutex_unlock(&kvm_lock);
6250
6251 return 0;
6252 }
6253
6254 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6255 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6256
vcpu_stat_get(void * _offset,u64 * val)6257 static int vcpu_stat_get(void *_offset, u64 *val)
6258 {
6259 unsigned offset = (long)_offset;
6260 struct kvm *kvm;
6261 u64 tmp_val;
6262
6263 *val = 0;
6264 mutex_lock(&kvm_lock);
6265 list_for_each_entry(kvm, &vm_list, vm_list) {
6266 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6267 *val += tmp_val;
6268 }
6269 mutex_unlock(&kvm_lock);
6270 return 0;
6271 }
6272
vcpu_stat_clear(void * _offset,u64 val)6273 static int vcpu_stat_clear(void *_offset, u64 val)
6274 {
6275 unsigned offset = (long)_offset;
6276 struct kvm *kvm;
6277
6278 if (val)
6279 return -EINVAL;
6280
6281 mutex_lock(&kvm_lock);
6282 list_for_each_entry(kvm, &vm_list, vm_list) {
6283 kvm_clear_stat_per_vcpu(kvm, offset);
6284 }
6285 mutex_unlock(&kvm_lock);
6286
6287 return 0;
6288 }
6289
6290 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6291 "%llu\n");
6292 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6293
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)6294 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6295 {
6296 struct kobj_uevent_env *env;
6297 unsigned long long created, active;
6298
6299 if (!kvm_dev.this_device || !kvm)
6300 return;
6301
6302 mutex_lock(&kvm_lock);
6303 if (type == KVM_EVENT_CREATE_VM) {
6304 kvm_createvm_count++;
6305 kvm_active_vms++;
6306 } else if (type == KVM_EVENT_DESTROY_VM) {
6307 kvm_active_vms--;
6308 }
6309 created = kvm_createvm_count;
6310 active = kvm_active_vms;
6311 mutex_unlock(&kvm_lock);
6312
6313 env = kzalloc_obj(*env);
6314 if (!env)
6315 return;
6316
6317 add_uevent_var(env, "CREATED=%llu", created);
6318 add_uevent_var(env, "COUNT=%llu", active);
6319
6320 if (type == KVM_EVENT_CREATE_VM) {
6321 add_uevent_var(env, "EVENT=create");
6322 kvm->userspace_pid = task_pid_nr(current);
6323 } else if (type == KVM_EVENT_DESTROY_VM) {
6324 add_uevent_var(env, "EVENT=destroy");
6325 }
6326 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6327
6328 if (!IS_ERR(kvm->debugfs_dentry)) {
6329 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6330
6331 if (p) {
6332 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6333 if (!IS_ERR(tmp))
6334 add_uevent_var(env, "STATS_PATH=%s", tmp);
6335 kfree(p);
6336 }
6337 }
6338 /* no need for checks, since we are adding at most only 5 keys */
6339 env->envp[env->envp_idx++] = NULL;
6340 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6341 kfree(env);
6342 }
6343
kvm_init_debug(void)6344 static void kvm_init_debug(void)
6345 {
6346 const struct file_operations *fops;
6347 const struct kvm_stats_desc *pdesc;
6348 int i;
6349
6350 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6351
6352 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6353 pdesc = &kvm_vm_stats_desc[i];
6354 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6355 fops = &vm_stat_fops;
6356 else
6357 fops = &vm_stat_readonly_fops;
6358 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6359 kvm_debugfs_dir,
6360 (void *)(long)pdesc->offset, fops);
6361 }
6362
6363 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6364 pdesc = &kvm_vcpu_stats_desc[i];
6365 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6366 fops = &vcpu_stat_fops;
6367 else
6368 fops = &vcpu_stat_readonly_fops;
6369 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6370 kvm_debugfs_dir,
6371 (void *)(long)pdesc->offset, fops);
6372 }
6373 }
6374
6375 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)6376 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6377 {
6378 return container_of(pn, struct kvm_vcpu, preempt_notifier);
6379 }
6380
kvm_sched_in(struct preempt_notifier * pn,int cpu)6381 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6382 {
6383 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6384
6385 WRITE_ONCE(vcpu->preempted, false);
6386 WRITE_ONCE(vcpu->ready, false);
6387
6388 __this_cpu_write(kvm_running_vcpu, vcpu);
6389 kvm_arch_vcpu_load(vcpu, cpu);
6390
6391 WRITE_ONCE(vcpu->scheduled_out, false);
6392 }
6393
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)6394 static void kvm_sched_out(struct preempt_notifier *pn,
6395 struct task_struct *next)
6396 {
6397 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6398
6399 WRITE_ONCE(vcpu->scheduled_out, true);
6400
6401 if (task_is_runnable(current) && vcpu->wants_to_run) {
6402 WRITE_ONCE(vcpu->preempted, true);
6403 WRITE_ONCE(vcpu->ready, true);
6404 }
6405 kvm_arch_vcpu_put(vcpu);
6406 __this_cpu_write(kvm_running_vcpu, NULL);
6407 }
6408
6409 /**
6410 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6411 *
6412 * We can disable preemption locally around accessing the per-CPU variable,
6413 * and use the resolved vcpu pointer after enabling preemption again,
6414 * because even if the current thread is migrated to another CPU, reading
6415 * the per-CPU value later will give us the same value as we update the
6416 * per-CPU variable in the preempt notifier handlers.
6417 */
kvm_get_running_vcpu(void)6418 struct kvm_vcpu *kvm_get_running_vcpu(void)
6419 {
6420 struct kvm_vcpu *vcpu;
6421
6422 preempt_disable();
6423 vcpu = __this_cpu_read(kvm_running_vcpu);
6424 preempt_enable();
6425
6426 return vcpu;
6427 }
6428 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
6429
6430 /**
6431 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6432 */
kvm_get_running_vcpus(void)6433 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6434 {
6435 return &kvm_running_vcpu;
6436 }
6437
6438 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6439 static unsigned int kvm_guest_state(void)
6440 {
6441 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6442 unsigned int state;
6443
6444 if (!kvm_arch_pmi_in_guest(vcpu))
6445 return 0;
6446
6447 state = PERF_GUEST_ACTIVE;
6448 if (!kvm_arch_vcpu_in_kernel(vcpu))
6449 state |= PERF_GUEST_USER;
6450
6451 return state;
6452 }
6453
kvm_guest_get_ip(void)6454 static unsigned long kvm_guest_get_ip(void)
6455 {
6456 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6457
6458 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6459 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6460 return 0;
6461
6462 return kvm_arch_vcpu_get_ip(vcpu);
6463 }
6464
6465 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6466 .state = kvm_guest_state,
6467 .get_ip = kvm_guest_get_ip,
6468 .handle_intel_pt_intr = NULL,
6469 .handle_mediated_pmi = NULL,
6470 };
6471
__kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void),void (* mediated_pmi_handler)(void))6472 void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void),
6473 void (*mediated_pmi_handler)(void))
6474 {
6475 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6476 kvm_guest_cbs.handle_mediated_pmi = mediated_pmi_handler;
6477
6478 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6479 }
kvm_unregister_perf_callbacks(void)6480 void kvm_unregister_perf_callbacks(void)
6481 {
6482 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6483 }
6484 #endif
6485
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)6486 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6487 {
6488 int r;
6489 int cpu;
6490
6491 /* A kmem cache lets us meet the alignment requirements of fx_save. */
6492 if (!vcpu_align)
6493 vcpu_align = __alignof__(struct kvm_vcpu);
6494 kvm_vcpu_cache =
6495 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6496 SLAB_ACCOUNT,
6497 offsetof(struct kvm_vcpu, arch),
6498 offsetofend(struct kvm_vcpu, stats_id)
6499 - offsetof(struct kvm_vcpu, arch),
6500 NULL);
6501 if (!kvm_vcpu_cache)
6502 return -ENOMEM;
6503
6504 for_each_possible_cpu(cpu) {
6505 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6506 GFP_KERNEL, cpu_to_node(cpu))) {
6507 r = -ENOMEM;
6508 goto err_cpu_kick_mask;
6509 }
6510 }
6511
6512 r = kvm_irqfd_init();
6513 if (r)
6514 goto err_irqfd;
6515
6516 r = kvm_async_pf_init();
6517 if (r)
6518 goto err_async_pf;
6519
6520 kvm_chardev_ops.owner = module;
6521 kvm_vm_fops.owner = module;
6522 kvm_vcpu_fops.owner = module;
6523 kvm_device_fops.owner = module;
6524
6525 kvm_preempt_ops.sched_in = kvm_sched_in;
6526 kvm_preempt_ops.sched_out = kvm_sched_out;
6527
6528 kvm_init_debug();
6529
6530 r = kvm_vfio_ops_init();
6531 if (WARN_ON_ONCE(r))
6532 goto err_vfio;
6533
6534 r = kvm_gmem_init(module);
6535 if (r)
6536 goto err_gmem;
6537
6538 r = kvm_init_virtualization();
6539 if (r)
6540 goto err_virt;
6541
6542 /*
6543 * Registration _must_ be the very last thing done, as this exposes
6544 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6545 */
6546 r = misc_register(&kvm_dev);
6547 if (r) {
6548 pr_err("kvm: misc device register failed\n");
6549 goto err_register;
6550 }
6551
6552 return 0;
6553
6554 err_register:
6555 kvm_uninit_virtualization();
6556 err_virt:
6557 kvm_gmem_exit();
6558 err_gmem:
6559 kvm_vfio_ops_exit();
6560 err_vfio:
6561 kvm_async_pf_deinit();
6562 err_async_pf:
6563 kvm_irqfd_exit();
6564 err_irqfd:
6565 err_cpu_kick_mask:
6566 for_each_possible_cpu(cpu)
6567 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6568 kmem_cache_destroy(kvm_vcpu_cache);
6569 return r;
6570 }
6571 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
6572
kvm_exit(void)6573 void kvm_exit(void)
6574 {
6575 int cpu;
6576
6577 /*
6578 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6579 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6580 * to KVM while the module is being stopped.
6581 */
6582 misc_deregister(&kvm_dev);
6583
6584 kvm_uninit_virtualization();
6585
6586 debugfs_remove_recursive(kvm_debugfs_dir);
6587 for_each_possible_cpu(cpu)
6588 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6589 kmem_cache_destroy(kvm_vcpu_cache);
6590 kvm_gmem_exit();
6591 kvm_vfio_ops_exit();
6592 kvm_async_pf_deinit();
6593 kvm_irqfd_exit();
6594 }
6595 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);
6596