1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine (KVM) Hypervisor
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 */
12
13 #include <kvm/iodev.h>
14
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 #include <linux/sort.h>
47 #include <linux/bsearch.h>
48 #include <linux/io.h>
49 #include <linux/lockdep.h>
50 #include <linux/kthread.h>
51 #include <linux/suspend.h>
52
53 #include <asm/processor.h>
54 #include <asm/ioctl.h>
55 #include <linux/uaccess.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59 #include "kvm_mm.h"
60 #include "vfio.h"
61
62 #include <trace/events/ipi.h>
63
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66
67 #include <linux/kvm_dirty_ring.h>
68
69
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72
73 MODULE_AUTHOR("Qumranet");
74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
75 MODULE_LICENSE("GPL");
76
77 /* Architectures should define their poll value according to the halt latency */
78 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
79 module_param(halt_poll_ns, uint, 0644);
80 EXPORT_SYMBOL_GPL(halt_poll_ns);
81
82 /* Default doubles per-vcpu halt_poll_ns. */
83 unsigned int halt_poll_ns_grow = 2;
84 module_param(halt_poll_ns_grow, uint, 0644);
85 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
86
87 /* The start value to grow halt_poll_ns from */
88 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
89 module_param(halt_poll_ns_grow_start, uint, 0644);
90 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
91
92 /* Default halves per-vcpu halt_poll_ns. */
93 unsigned int halt_poll_ns_shrink = 2;
94 module_param(halt_poll_ns_shrink, uint, 0644);
95 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
96
97 /*
98 * Ordering of locks:
99 *
100 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
101 */
102
103 DEFINE_MUTEX(kvm_lock);
104 LIST_HEAD(vm_list);
105
106 static struct kmem_cache *kvm_vcpu_cache;
107
108 static __read_mostly struct preempt_ops kvm_preempt_ops;
109 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
110
111 static struct dentry *kvm_debugfs_dir;
112
113 static const struct file_operations stat_fops_per_vm;
114
115 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
116 unsigned long arg);
117 #ifdef CONFIG_KVM_COMPAT
118 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
119 unsigned long arg);
120 #define KVM_COMPAT(c) .compat_ioctl = (c)
121 #else
122 /*
123 * For architectures that don't implement a compat infrastructure,
124 * adopt a double line of defense:
125 * - Prevent a compat task from opening /dev/kvm
126 * - If the open has been done by a 64bit task, and the KVM fd
127 * passed to a compat task, let the ioctls fail.
128 */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)129 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
130 unsigned long arg) { return -EINVAL; }
131
kvm_no_compat_open(struct inode * inode,struct file * file)132 static int kvm_no_compat_open(struct inode *inode, struct file *file)
133 {
134 return is_compat_task() ? -ENODEV : 0;
135 }
136 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
137 .open = kvm_no_compat_open
138 #endif
139 static int hardware_enable_all(void);
140 static void hardware_disable_all(void);
141
142 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
143
144 #define KVM_EVENT_CREATE_VM 0
145 #define KVM_EVENT_DESTROY_VM 1
146 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
147 static unsigned long long kvm_createvm_count;
148 static unsigned long long kvm_active_vms;
149
150 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
151
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)152 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
153 {
154 }
155
kvm_is_zone_device_page(struct page * page)156 bool kvm_is_zone_device_page(struct page *page)
157 {
158 /*
159 * The metadata used by is_zone_device_page() to determine whether or
160 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
161 * the device has been pinned, e.g. by get_user_pages(). WARN if the
162 * page_count() is zero to help detect bad usage of this helper.
163 */
164 if (WARN_ON_ONCE(!page_count(page)))
165 return false;
166
167 return is_zone_device_page(page);
168 }
169
170 /*
171 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
172 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types
173 * is likely incomplete, it has been compiled purely through people wanting to
174 * back guest with a certain type of memory and encountering issues.
175 */
kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)176 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
177 {
178 struct page *page;
179
180 if (!pfn_valid(pfn))
181 return NULL;
182
183 page = pfn_to_page(pfn);
184 if (!PageReserved(page))
185 return page;
186
187 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
188 if (is_zero_pfn(pfn))
189 return page;
190
191 /*
192 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
193 * perspective they are "normal" pages, albeit with slightly different
194 * usage rules.
195 */
196 if (kvm_is_zone_device_page(page))
197 return page;
198
199 return NULL;
200 }
201
202 /*
203 * Switches to specified vcpu, until a matching vcpu_put()
204 */
vcpu_load(struct kvm_vcpu * vcpu)205 void vcpu_load(struct kvm_vcpu *vcpu)
206 {
207 int cpu = get_cpu();
208
209 __this_cpu_write(kvm_running_vcpu, vcpu);
210 preempt_notifier_register(&vcpu->preempt_notifier);
211 kvm_arch_vcpu_load(vcpu, cpu);
212 put_cpu();
213 }
214 EXPORT_SYMBOL_GPL(vcpu_load);
215
vcpu_put(struct kvm_vcpu * vcpu)216 void vcpu_put(struct kvm_vcpu *vcpu)
217 {
218 preempt_disable();
219 kvm_arch_vcpu_put(vcpu);
220 preempt_notifier_unregister(&vcpu->preempt_notifier);
221 __this_cpu_write(kvm_running_vcpu, NULL);
222 preempt_enable();
223 }
224 EXPORT_SYMBOL_GPL(vcpu_put);
225
226 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228 {
229 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230
231 /*
232 * We need to wait for the VCPU to reenable interrupts and get out of
233 * READING_SHADOW_PAGE_TABLES mode.
234 */
235 if (req & KVM_REQUEST_WAIT)
236 return mode != OUTSIDE_GUEST_MODE;
237
238 /*
239 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 */
241 return mode == IN_GUEST_MODE;
242 }
243
ack_kick(void * _completed)244 static void ack_kick(void *_completed)
245 {
246 }
247
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)248 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
249 {
250 if (cpumask_empty(cpus))
251 return false;
252
253 smp_call_function_many(cpus, ack_kick, NULL, wait);
254 return true;
255 }
256
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)257 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
258 struct cpumask *tmp, int current_cpu)
259 {
260 int cpu;
261
262 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
263 __kvm_make_request(req, vcpu);
264
265 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
266 return;
267
268 /*
269 * Note, the vCPU could get migrated to a different pCPU at any point
270 * after kvm_request_needs_ipi(), which could result in sending an IPI
271 * to the previous pCPU. But, that's OK because the purpose of the IPI
272 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
273 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
274 * after this point is also OK, as the requirement is only that KVM wait
275 * for vCPUs that were reading SPTEs _before_ any changes were
276 * finalized. See kvm_vcpu_kick() for more details on handling requests.
277 */
278 if (kvm_request_needs_ipi(vcpu, req)) {
279 cpu = READ_ONCE(vcpu->cpu);
280 if (cpu != -1 && cpu != current_cpu)
281 __cpumask_set_cpu(cpu, tmp);
282 }
283 }
284
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)285 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
286 unsigned long *vcpu_bitmap)
287 {
288 struct kvm_vcpu *vcpu;
289 struct cpumask *cpus;
290 int i, me;
291 bool called;
292
293 me = get_cpu();
294
295 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
296 cpumask_clear(cpus);
297
298 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
299 vcpu = kvm_get_vcpu(kvm, i);
300 if (!vcpu)
301 continue;
302 kvm_make_vcpu_request(vcpu, req, cpus, me);
303 }
304
305 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
306 put_cpu();
307
308 return called;
309 }
310
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)311 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
312 {
313 struct kvm_vcpu *vcpu;
314 struct cpumask *cpus;
315 unsigned long i;
316 bool called;
317 int me;
318
319 me = get_cpu();
320
321 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
322 cpumask_clear(cpus);
323
324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvm_make_vcpu_request(vcpu, req, cpus, me);
326
327 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
328 put_cpu();
329
330 return called;
331 }
332 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
333
kvm_flush_remote_tlbs(struct kvm * kvm)334 void kvm_flush_remote_tlbs(struct kvm *kvm)
335 {
336 ++kvm->stat.generic.remote_tlb_flush_requests;
337
338 /*
339 * We want to publish modifications to the page tables before reading
340 * mode. Pairs with a memory barrier in arch-specific code.
341 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
342 * and smp_mb in walk_shadow_page_lockless_begin/end.
343 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
344 *
345 * There is already an smp_mb__after_atomic() before
346 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
347 * barrier here.
348 */
349 if (!kvm_arch_flush_remote_tlbs(kvm)
350 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
351 ++kvm->stat.generic.remote_tlb_flush;
352 }
353 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
354
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)355 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
356 {
357 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
358 return;
359
360 /*
361 * Fall back to a flushing entire TLBs if the architecture range-based
362 * TLB invalidation is unsupported or can't be performed for whatever
363 * reason.
364 */
365 kvm_flush_remote_tlbs(kvm);
366 }
367
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)368 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
369 const struct kvm_memory_slot *memslot)
370 {
371 /*
372 * All current use cases for flushing the TLBs for a specific memslot
373 * are related to dirty logging, and many do the TLB flush out of
374 * mmu_lock. The interaction between the various operations on memslot
375 * must be serialized by slots_locks to ensure the TLB flush from one
376 * operation is observed by any other operation on the same memslot.
377 */
378 lockdep_assert_held(&kvm->slots_lock);
379 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
380 }
381
kvm_flush_shadow_all(struct kvm * kvm)382 static void kvm_flush_shadow_all(struct kvm *kvm)
383 {
384 kvm_arch_flush_shadow_all(kvm);
385 kvm_arch_guest_memory_reclaimed(kvm);
386 }
387
388 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)389 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
390 gfp_t gfp_flags)
391 {
392 void *page;
393
394 gfp_flags |= mc->gfp_zero;
395
396 if (mc->kmem_cache)
397 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
398
399 page = (void *)__get_free_page(gfp_flags);
400 if (page && mc->init_value)
401 memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
402 return page;
403 }
404
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)405 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
406 {
407 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
408 void *obj;
409
410 if (mc->nobjs >= min)
411 return 0;
412
413 if (unlikely(!mc->objects)) {
414 if (WARN_ON_ONCE(!capacity))
415 return -EIO;
416
417 /*
418 * Custom init values can be used only for page allocations,
419 * and obviously conflict with __GFP_ZERO.
420 */
421 if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
422 return -EIO;
423
424 mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
425 if (!mc->objects)
426 return -ENOMEM;
427
428 mc->capacity = capacity;
429 }
430
431 /* It is illegal to request a different capacity across topups. */
432 if (WARN_ON_ONCE(mc->capacity != capacity))
433 return -EIO;
434
435 while (mc->nobjs < mc->capacity) {
436 obj = mmu_memory_cache_alloc_obj(mc, gfp);
437 if (!obj)
438 return mc->nobjs >= min ? 0 : -ENOMEM;
439 mc->objects[mc->nobjs++] = obj;
440 }
441 return 0;
442 }
443
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)444 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
445 {
446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
447 }
448
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)449 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
450 {
451 return mc->nobjs;
452 }
453
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)454 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
455 {
456 while (mc->nobjs) {
457 if (mc->kmem_cache)
458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
459 else
460 free_page((unsigned long)mc->objects[--mc->nobjs]);
461 }
462
463 kvfree(mc->objects);
464
465 mc->objects = NULL;
466 mc->capacity = 0;
467 }
468
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)469 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
470 {
471 void *p;
472
473 if (WARN_ON(!mc->nobjs))
474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
475 else
476 p = mc->objects[--mc->nobjs];
477 BUG_ON(!p);
478 return p;
479 }
480 #endif
481
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)482 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
483 {
484 mutex_init(&vcpu->mutex);
485 vcpu->cpu = -1;
486 vcpu->kvm = kvm;
487 vcpu->vcpu_id = id;
488 vcpu->pid = NULL;
489 #ifndef __KVM_HAVE_ARCH_WQP
490 rcuwait_init(&vcpu->wait);
491 #endif
492 kvm_async_pf_vcpu_init(vcpu);
493
494 kvm_vcpu_set_in_spin_loop(vcpu, false);
495 kvm_vcpu_set_dy_eligible(vcpu, false);
496 vcpu->preempted = false;
497 vcpu->ready = false;
498 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
499 vcpu->last_used_slot = NULL;
500
501 /* Fill the stats id string for the vcpu */
502 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
503 task_pid_nr(current), id);
504 }
505
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)506 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
507 {
508 kvm_arch_vcpu_destroy(vcpu);
509 kvm_dirty_ring_free(&vcpu->dirty_ring);
510
511 /*
512 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
513 * the vcpu->pid pointer, and at destruction time all file descriptors
514 * are already gone.
515 */
516 put_pid(rcu_dereference_protected(vcpu->pid, 1));
517
518 free_page((unsigned long)vcpu->run);
519 kmem_cache_free(kvm_vcpu_cache, vcpu);
520 }
521
kvm_destroy_vcpus(struct kvm * kvm)522 void kvm_destroy_vcpus(struct kvm *kvm)
523 {
524 unsigned long i;
525 struct kvm_vcpu *vcpu;
526
527 kvm_for_each_vcpu(i, vcpu, kvm) {
528 kvm_vcpu_destroy(vcpu);
529 xa_erase(&kvm->vcpu_array, i);
530 }
531
532 atomic_set(&kvm->online_vcpus, 0);
533 }
534 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
535
536 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
mmu_notifier_to_kvm(struct mmu_notifier * mn)537 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
538 {
539 return container_of(mn, struct kvm, mmu_notifier);
540 }
541
542 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
543
544 typedef void (*on_lock_fn_t)(struct kvm *kvm);
545
546 struct kvm_mmu_notifier_range {
547 /*
548 * 64-bit addresses, as KVM notifiers can operate on host virtual
549 * addresses (unsigned long) and guest physical addresses (64-bit).
550 */
551 u64 start;
552 u64 end;
553 union kvm_mmu_notifier_arg arg;
554 gfn_handler_t handler;
555 on_lock_fn_t on_lock;
556 bool flush_on_ret;
557 bool may_block;
558 };
559
560 /*
561 * The inner-most helper returns a tuple containing the return value from the
562 * arch- and action-specific handler, plus a flag indicating whether or not at
563 * least one memslot was found, i.e. if the handler found guest memory.
564 *
565 * Note, most notifiers are averse to booleans, so even though KVM tracks the
566 * return from arch code as a bool, outer helpers will cast it to an int. :-(
567 */
568 typedef struct kvm_mmu_notifier_return {
569 bool ret;
570 bool found_memslot;
571 } kvm_mn_ret_t;
572
573 /*
574 * Use a dedicated stub instead of NULL to indicate that there is no callback
575 * function/handler. The compiler technically can't guarantee that a real
576 * function will have a non-zero address, and so it will generate code to
577 * check for !NULL, whereas comparing against a stub will be elided at compile
578 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
579 */
kvm_null_fn(void)580 static void kvm_null_fn(void)
581 {
582
583 }
584 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
585
586 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
587 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
588 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
589 node; \
590 node = interval_tree_iter_next(node, start, last)) \
591
__kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range)592 static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
593 const struct kvm_mmu_notifier_range *range)
594 {
595 struct kvm_mmu_notifier_return r = {
596 .ret = false,
597 .found_memslot = false,
598 };
599 struct kvm_gfn_range gfn_range;
600 struct kvm_memory_slot *slot;
601 struct kvm_memslots *slots;
602 int i, idx;
603
604 if (WARN_ON_ONCE(range->end <= range->start))
605 return r;
606
607 /* A null handler is allowed if and only if on_lock() is provided. */
608 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
609 IS_KVM_NULL_FN(range->handler)))
610 return r;
611
612 idx = srcu_read_lock(&kvm->srcu);
613
614 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
615 struct interval_tree_node *node;
616
617 slots = __kvm_memslots(kvm, i);
618 kvm_for_each_memslot_in_hva_range(node, slots,
619 range->start, range->end - 1) {
620 unsigned long hva_start, hva_end;
621
622 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
623 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
624 hva_end = min_t(unsigned long, range->end,
625 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
626
627 /*
628 * To optimize for the likely case where the address
629 * range is covered by zero or one memslots, don't
630 * bother making these conditional (to avoid writes on
631 * the second or later invocation of the handler).
632 */
633 gfn_range.arg = range->arg;
634 gfn_range.may_block = range->may_block;
635
636 /*
637 * {gfn(page) | page intersects with [hva_start, hva_end)} =
638 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
639 */
640 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
641 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
642 gfn_range.slot = slot;
643
644 if (!r.found_memslot) {
645 r.found_memslot = true;
646 KVM_MMU_LOCK(kvm);
647 if (!IS_KVM_NULL_FN(range->on_lock))
648 range->on_lock(kvm);
649
650 if (IS_KVM_NULL_FN(range->handler))
651 goto mmu_unlock;
652 }
653 r.ret |= range->handler(kvm, &gfn_range);
654 }
655 }
656
657 if (range->flush_on_ret && r.ret)
658 kvm_flush_remote_tlbs(kvm);
659
660 mmu_unlock:
661 if (r.found_memslot)
662 KVM_MMU_UNLOCK(kvm);
663
664 srcu_read_unlock(&kvm->srcu, idx);
665
666 return r;
667 }
668
kvm_handle_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)669 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
670 unsigned long start,
671 unsigned long end,
672 gfn_handler_t handler)
673 {
674 struct kvm *kvm = mmu_notifier_to_kvm(mn);
675 const struct kvm_mmu_notifier_range range = {
676 .start = start,
677 .end = end,
678 .handler = handler,
679 .on_lock = (void *)kvm_null_fn,
680 .flush_on_ret = true,
681 .may_block = false,
682 };
683
684 return __kvm_handle_hva_range(kvm, &range).ret;
685 }
686
kvm_handle_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)687 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
688 unsigned long start,
689 unsigned long end,
690 gfn_handler_t handler)
691 {
692 struct kvm *kvm = mmu_notifier_to_kvm(mn);
693 const struct kvm_mmu_notifier_range range = {
694 .start = start,
695 .end = end,
696 .handler = handler,
697 .on_lock = (void *)kvm_null_fn,
698 .flush_on_ret = false,
699 .may_block = false,
700 };
701
702 return __kvm_handle_hva_range(kvm, &range).ret;
703 }
704
kvm_mmu_invalidate_begin(struct kvm * kvm)705 void kvm_mmu_invalidate_begin(struct kvm *kvm)
706 {
707 lockdep_assert_held_write(&kvm->mmu_lock);
708 /*
709 * The count increase must become visible at unlock time as no
710 * spte can be established without taking the mmu_lock and
711 * count is also read inside the mmu_lock critical section.
712 */
713 kvm->mmu_invalidate_in_progress++;
714
715 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
716 kvm->mmu_invalidate_range_start = INVALID_GPA;
717 kvm->mmu_invalidate_range_end = INVALID_GPA;
718 }
719 }
720
kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end)721 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
722 {
723 lockdep_assert_held_write(&kvm->mmu_lock);
724
725 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
726
727 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
728 kvm->mmu_invalidate_range_start = start;
729 kvm->mmu_invalidate_range_end = end;
730 } else {
731 /*
732 * Fully tracking multiple concurrent ranges has diminishing
733 * returns. Keep things simple and just find the minimal range
734 * which includes the current and new ranges. As there won't be
735 * enough information to subtract a range after its invalidate
736 * completes, any ranges invalidated concurrently will
737 * accumulate and persist until all outstanding invalidates
738 * complete.
739 */
740 kvm->mmu_invalidate_range_start =
741 min(kvm->mmu_invalidate_range_start, start);
742 kvm->mmu_invalidate_range_end =
743 max(kvm->mmu_invalidate_range_end, end);
744 }
745 }
746
kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)747 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
748 {
749 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
750 return kvm_unmap_gfn_range(kvm, range);
751 }
752
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)753 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
754 const struct mmu_notifier_range *range)
755 {
756 struct kvm *kvm = mmu_notifier_to_kvm(mn);
757 const struct kvm_mmu_notifier_range hva_range = {
758 .start = range->start,
759 .end = range->end,
760 .handler = kvm_mmu_unmap_gfn_range,
761 .on_lock = kvm_mmu_invalidate_begin,
762 .flush_on_ret = true,
763 .may_block = mmu_notifier_range_blockable(range),
764 };
765
766 trace_kvm_unmap_hva_range(range->start, range->end);
767
768 /*
769 * Prevent memslot modification between range_start() and range_end()
770 * so that conditionally locking provides the same result in both
771 * functions. Without that guarantee, the mmu_invalidate_in_progress
772 * adjustments will be imbalanced.
773 *
774 * Pairs with the decrement in range_end().
775 */
776 spin_lock(&kvm->mn_invalidate_lock);
777 kvm->mn_active_invalidate_count++;
778 spin_unlock(&kvm->mn_invalidate_lock);
779
780 /*
781 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
782 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
783 * each cache's lock. There are relatively few caches in existence at
784 * any given time, and the caches themselves can check for hva overlap,
785 * i.e. don't need to rely on memslot overlap checks for performance.
786 * Because this runs without holding mmu_lock, the pfn caches must use
787 * mn_active_invalidate_count (see above) instead of
788 * mmu_invalidate_in_progress.
789 */
790 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
791
792 /*
793 * If one or more memslots were found and thus zapped, notify arch code
794 * that guest memory has been reclaimed. This needs to be done *after*
795 * dropping mmu_lock, as x86's reclaim path is slooooow.
796 */
797 if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
798 kvm_arch_guest_memory_reclaimed(kvm);
799
800 return 0;
801 }
802
kvm_mmu_invalidate_end(struct kvm * kvm)803 void kvm_mmu_invalidate_end(struct kvm *kvm)
804 {
805 lockdep_assert_held_write(&kvm->mmu_lock);
806
807 /*
808 * This sequence increase will notify the kvm page fault that
809 * the page that is going to be mapped in the spte could have
810 * been freed.
811 */
812 kvm->mmu_invalidate_seq++;
813 smp_wmb();
814 /*
815 * The above sequence increase must be visible before the
816 * below count decrease, which is ensured by the smp_wmb above
817 * in conjunction with the smp_rmb in mmu_invalidate_retry().
818 */
819 kvm->mmu_invalidate_in_progress--;
820 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
821
822 /*
823 * Assert that at least one range was added between start() and end().
824 * Not adding a range isn't fatal, but it is a KVM bug.
825 */
826 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
827 }
828
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)829 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
830 const struct mmu_notifier_range *range)
831 {
832 struct kvm *kvm = mmu_notifier_to_kvm(mn);
833 const struct kvm_mmu_notifier_range hva_range = {
834 .start = range->start,
835 .end = range->end,
836 .handler = (void *)kvm_null_fn,
837 .on_lock = kvm_mmu_invalidate_end,
838 .flush_on_ret = false,
839 .may_block = mmu_notifier_range_blockable(range),
840 };
841 bool wake;
842
843 __kvm_handle_hva_range(kvm, &hva_range);
844
845 /* Pairs with the increment in range_start(). */
846 spin_lock(&kvm->mn_invalidate_lock);
847 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
848 --kvm->mn_active_invalidate_count;
849 wake = !kvm->mn_active_invalidate_count;
850 spin_unlock(&kvm->mn_invalidate_lock);
851
852 /*
853 * There can only be one waiter, since the wait happens under
854 * slots_lock.
855 */
856 if (wake)
857 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
858 }
859
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)860 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
861 struct mm_struct *mm,
862 unsigned long start,
863 unsigned long end)
864 {
865 trace_kvm_age_hva(start, end);
866
867 return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
868 }
869
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)870 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
871 struct mm_struct *mm,
872 unsigned long start,
873 unsigned long end)
874 {
875 trace_kvm_age_hva(start, end);
876
877 /*
878 * Even though we do not flush TLB, this will still adversely
879 * affect performance on pre-Haswell Intel EPT, where there is
880 * no EPT Access Bit to clear so that we have to tear down EPT
881 * tables instead. If we find this unacceptable, we can always
882 * add a parameter to kvm_age_hva so that it effectively doesn't
883 * do anything on clear_young.
884 *
885 * Also note that currently we never issue secondary TLB flushes
886 * from clear_young, leaving this job up to the regular system
887 * cadence. If we find this inaccurate, we might come up with a
888 * more sophisticated heuristic later.
889 */
890 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
891 }
892
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)893 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
894 struct mm_struct *mm,
895 unsigned long address)
896 {
897 trace_kvm_test_age_hva(address);
898
899 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
900 kvm_test_age_gfn);
901 }
902
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)903 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
904 struct mm_struct *mm)
905 {
906 struct kvm *kvm = mmu_notifier_to_kvm(mn);
907 int idx;
908
909 idx = srcu_read_lock(&kvm->srcu);
910 kvm_flush_shadow_all(kvm);
911 srcu_read_unlock(&kvm->srcu, idx);
912 }
913
914 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
915 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
916 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
917 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
918 .clear_young = kvm_mmu_notifier_clear_young,
919 .test_young = kvm_mmu_notifier_test_young,
920 .release = kvm_mmu_notifier_release,
921 };
922
kvm_init_mmu_notifier(struct kvm * kvm)923 static int kvm_init_mmu_notifier(struct kvm *kvm)
924 {
925 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
926 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
927 }
928
929 #else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
930
kvm_init_mmu_notifier(struct kvm * kvm)931 static int kvm_init_mmu_notifier(struct kvm *kvm)
932 {
933 return 0;
934 }
935
936 #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
937
938 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)939 static int kvm_pm_notifier_call(struct notifier_block *bl,
940 unsigned long state,
941 void *unused)
942 {
943 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
944
945 return kvm_arch_pm_notifier(kvm, state);
946 }
947
kvm_init_pm_notifier(struct kvm * kvm)948 static void kvm_init_pm_notifier(struct kvm *kvm)
949 {
950 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
951 /* Suspend KVM before we suspend ftrace, RCU, etc. */
952 kvm->pm_notifier.priority = INT_MAX;
953 register_pm_notifier(&kvm->pm_notifier);
954 }
955
kvm_destroy_pm_notifier(struct kvm * kvm)956 static void kvm_destroy_pm_notifier(struct kvm *kvm)
957 {
958 unregister_pm_notifier(&kvm->pm_notifier);
959 }
960 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)961 static void kvm_init_pm_notifier(struct kvm *kvm)
962 {
963 }
964
kvm_destroy_pm_notifier(struct kvm * kvm)965 static void kvm_destroy_pm_notifier(struct kvm *kvm)
966 {
967 }
968 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
969
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)970 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
971 {
972 if (!memslot->dirty_bitmap)
973 return;
974
975 vfree(memslot->dirty_bitmap);
976 memslot->dirty_bitmap = NULL;
977 }
978
979 /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)980 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
981 {
982 if (slot->flags & KVM_MEM_GUEST_MEMFD)
983 kvm_gmem_unbind(slot);
984
985 kvm_destroy_dirty_bitmap(slot);
986
987 kvm_arch_free_memslot(kvm, slot);
988
989 kfree(slot);
990 }
991
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)992 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
993 {
994 struct hlist_node *idnode;
995 struct kvm_memory_slot *memslot;
996 int bkt;
997
998 /*
999 * The same memslot objects live in both active and inactive sets,
1000 * arbitrarily free using index '1' so the second invocation of this
1001 * function isn't operating over a structure with dangling pointers
1002 * (even though this function isn't actually touching them).
1003 */
1004 if (!slots->node_idx)
1005 return;
1006
1007 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1008 kvm_free_memslot(kvm, memslot);
1009 }
1010
kvm_stats_debugfs_mode(const struct _kvm_stats_desc * pdesc)1011 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1012 {
1013 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1014 case KVM_STATS_TYPE_INSTANT:
1015 return 0444;
1016 case KVM_STATS_TYPE_CUMULATIVE:
1017 case KVM_STATS_TYPE_PEAK:
1018 default:
1019 return 0644;
1020 }
1021 }
1022
1023
kvm_destroy_vm_debugfs(struct kvm * kvm)1024 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1025 {
1026 int i;
1027 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1028 kvm_vcpu_stats_header.num_desc;
1029
1030 if (IS_ERR(kvm->debugfs_dentry))
1031 return;
1032
1033 debugfs_remove_recursive(kvm->debugfs_dentry);
1034
1035 if (kvm->debugfs_stat_data) {
1036 for (i = 0; i < kvm_debugfs_num_entries; i++)
1037 kfree(kvm->debugfs_stat_data[i]);
1038 kfree(kvm->debugfs_stat_data);
1039 }
1040 }
1041
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)1042 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1043 {
1044 static DEFINE_MUTEX(kvm_debugfs_lock);
1045 struct dentry *dent;
1046 char dir_name[ITOA_MAX_LEN * 2];
1047 struct kvm_stat_data *stat_data;
1048 const struct _kvm_stats_desc *pdesc;
1049 int i, ret = -ENOMEM;
1050 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1051 kvm_vcpu_stats_header.num_desc;
1052
1053 if (!debugfs_initialized())
1054 return 0;
1055
1056 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1057 mutex_lock(&kvm_debugfs_lock);
1058 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1059 if (dent) {
1060 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1061 dput(dent);
1062 mutex_unlock(&kvm_debugfs_lock);
1063 return 0;
1064 }
1065 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1066 mutex_unlock(&kvm_debugfs_lock);
1067 if (IS_ERR(dent))
1068 return 0;
1069
1070 kvm->debugfs_dentry = dent;
1071 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1072 sizeof(*kvm->debugfs_stat_data),
1073 GFP_KERNEL_ACCOUNT);
1074 if (!kvm->debugfs_stat_data)
1075 goto out_err;
1076
1077 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1078 pdesc = &kvm_vm_stats_desc[i];
1079 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1080 if (!stat_data)
1081 goto out_err;
1082
1083 stat_data->kvm = kvm;
1084 stat_data->desc = pdesc;
1085 stat_data->kind = KVM_STAT_VM;
1086 kvm->debugfs_stat_data[i] = stat_data;
1087 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1088 kvm->debugfs_dentry, stat_data,
1089 &stat_fops_per_vm);
1090 }
1091
1092 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1093 pdesc = &kvm_vcpu_stats_desc[i];
1094 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1095 if (!stat_data)
1096 goto out_err;
1097
1098 stat_data->kvm = kvm;
1099 stat_data->desc = pdesc;
1100 stat_data->kind = KVM_STAT_VCPU;
1101 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1102 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1103 kvm->debugfs_dentry, stat_data,
1104 &stat_fops_per_vm);
1105 }
1106
1107 kvm_arch_create_vm_debugfs(kvm);
1108 return 0;
1109 out_err:
1110 kvm_destroy_vm_debugfs(kvm);
1111 return ret;
1112 }
1113
1114 /*
1115 * Called after the VM is otherwise initialized, but just before adding it to
1116 * the vm_list.
1117 */
kvm_arch_post_init_vm(struct kvm * kvm)1118 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1119 {
1120 return 0;
1121 }
1122
1123 /*
1124 * Called just after removing the VM from the vm_list, but before doing any
1125 * other destruction.
1126 */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1127 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1128 {
1129 }
1130
1131 /*
1132 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1133 * be setup already, so we can create arch-specific debugfs entries under it.
1134 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1135 * a per-arch destroy interface is not needed.
1136 */
kvm_arch_create_vm_debugfs(struct kvm * kvm)1137 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1138 {
1139 }
1140
kvm_create_vm(unsigned long type,const char * fdname)1141 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1142 {
1143 struct kvm *kvm = kvm_arch_alloc_vm();
1144 struct kvm_memslots *slots;
1145 int r, i, j;
1146
1147 if (!kvm)
1148 return ERR_PTR(-ENOMEM);
1149
1150 KVM_MMU_LOCK_INIT(kvm);
1151 mmgrab(current->mm);
1152 kvm->mm = current->mm;
1153 kvm_eventfd_init(kvm);
1154 mutex_init(&kvm->lock);
1155 mutex_init(&kvm->irq_lock);
1156 mutex_init(&kvm->slots_lock);
1157 mutex_init(&kvm->slots_arch_lock);
1158 spin_lock_init(&kvm->mn_invalidate_lock);
1159 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1160 xa_init(&kvm->vcpu_array);
1161 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1162 xa_init(&kvm->mem_attr_array);
1163 #endif
1164
1165 INIT_LIST_HEAD(&kvm->gpc_list);
1166 spin_lock_init(&kvm->gpc_lock);
1167
1168 INIT_LIST_HEAD(&kvm->devices);
1169 kvm->max_vcpus = KVM_MAX_VCPUS;
1170
1171 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1172
1173 /*
1174 * Force subsequent debugfs file creations to fail if the VM directory
1175 * is not created (by kvm_create_vm_debugfs()).
1176 */
1177 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1178
1179 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1180 task_pid_nr(current));
1181
1182 r = -ENOMEM;
1183 if (init_srcu_struct(&kvm->srcu))
1184 goto out_err_no_srcu;
1185 if (init_srcu_struct(&kvm->irq_srcu))
1186 goto out_err_no_irq_srcu;
1187
1188 r = kvm_init_irq_routing(kvm);
1189 if (r)
1190 goto out_err_no_irq_routing;
1191
1192 refcount_set(&kvm->users_count, 1);
1193
1194 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1195 for (j = 0; j < 2; j++) {
1196 slots = &kvm->__memslots[i][j];
1197
1198 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1199 slots->hva_tree = RB_ROOT_CACHED;
1200 slots->gfn_tree = RB_ROOT;
1201 hash_init(slots->id_hash);
1202 slots->node_idx = j;
1203
1204 /* Generations must be different for each address space. */
1205 slots->generation = i;
1206 }
1207
1208 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1209 }
1210
1211 r = -ENOMEM;
1212 for (i = 0; i < KVM_NR_BUSES; i++) {
1213 rcu_assign_pointer(kvm->buses[i],
1214 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1215 if (!kvm->buses[i])
1216 goto out_err_no_arch_destroy_vm;
1217 }
1218
1219 r = kvm_arch_init_vm(kvm, type);
1220 if (r)
1221 goto out_err_no_arch_destroy_vm;
1222
1223 r = hardware_enable_all();
1224 if (r)
1225 goto out_err_no_disable;
1226
1227 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1228 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1229 #endif
1230
1231 r = kvm_init_mmu_notifier(kvm);
1232 if (r)
1233 goto out_err_no_mmu_notifier;
1234
1235 r = kvm_coalesced_mmio_init(kvm);
1236 if (r < 0)
1237 goto out_no_coalesced_mmio;
1238
1239 r = kvm_create_vm_debugfs(kvm, fdname);
1240 if (r)
1241 goto out_err_no_debugfs;
1242
1243 r = kvm_arch_post_init_vm(kvm);
1244 if (r)
1245 goto out_err;
1246
1247 mutex_lock(&kvm_lock);
1248 list_add(&kvm->vm_list, &vm_list);
1249 mutex_unlock(&kvm_lock);
1250
1251 preempt_notifier_inc();
1252 kvm_init_pm_notifier(kvm);
1253
1254 return kvm;
1255
1256 out_err:
1257 kvm_destroy_vm_debugfs(kvm);
1258 out_err_no_debugfs:
1259 kvm_coalesced_mmio_free(kvm);
1260 out_no_coalesced_mmio:
1261 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1262 if (kvm->mmu_notifier.ops)
1263 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1264 #endif
1265 out_err_no_mmu_notifier:
1266 hardware_disable_all();
1267 out_err_no_disable:
1268 kvm_arch_destroy_vm(kvm);
1269 out_err_no_arch_destroy_vm:
1270 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1271 for (i = 0; i < KVM_NR_BUSES; i++)
1272 kfree(kvm_get_bus(kvm, i));
1273 kvm_free_irq_routing(kvm);
1274 out_err_no_irq_routing:
1275 cleanup_srcu_struct(&kvm->irq_srcu);
1276 out_err_no_irq_srcu:
1277 cleanup_srcu_struct(&kvm->srcu);
1278 out_err_no_srcu:
1279 kvm_arch_free_vm(kvm);
1280 mmdrop(current->mm);
1281 return ERR_PTR(r);
1282 }
1283
kvm_destroy_devices(struct kvm * kvm)1284 static void kvm_destroy_devices(struct kvm *kvm)
1285 {
1286 struct kvm_device *dev, *tmp;
1287
1288 /*
1289 * We do not need to take the kvm->lock here, because nobody else
1290 * has a reference to the struct kvm at this point and therefore
1291 * cannot access the devices list anyhow.
1292 *
1293 * The device list is generally managed as an rculist, but list_del()
1294 * is used intentionally here. If a bug in KVM introduced a reader that
1295 * was not backed by a reference on the kvm struct, the hope is that
1296 * it'd consume the poisoned forward pointer instead of suffering a
1297 * use-after-free, even though this cannot be guaranteed.
1298 */
1299 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1300 list_del(&dev->vm_node);
1301 dev->ops->destroy(dev);
1302 }
1303 }
1304
kvm_destroy_vm(struct kvm * kvm)1305 static void kvm_destroy_vm(struct kvm *kvm)
1306 {
1307 int i;
1308 struct mm_struct *mm = kvm->mm;
1309
1310 kvm_destroy_pm_notifier(kvm);
1311 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1312 kvm_destroy_vm_debugfs(kvm);
1313 kvm_arch_sync_events(kvm);
1314 mutex_lock(&kvm_lock);
1315 list_del(&kvm->vm_list);
1316 mutex_unlock(&kvm_lock);
1317 kvm_arch_pre_destroy_vm(kvm);
1318
1319 kvm_free_irq_routing(kvm);
1320 for (i = 0; i < KVM_NR_BUSES; i++) {
1321 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1322
1323 if (bus)
1324 kvm_io_bus_destroy(bus);
1325 kvm->buses[i] = NULL;
1326 }
1327 kvm_coalesced_mmio_free(kvm);
1328 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1329 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1330 /*
1331 * At this point, pending calls to invalidate_range_start()
1332 * have completed but no more MMU notifiers will run, so
1333 * mn_active_invalidate_count may remain unbalanced.
1334 * No threads can be waiting in kvm_swap_active_memslots() as the
1335 * last reference on KVM has been dropped, but freeing
1336 * memslots would deadlock without this manual intervention.
1337 *
1338 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1339 * notifier between a start() and end(), then there shouldn't be any
1340 * in-progress invalidations.
1341 */
1342 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1343 if (kvm->mn_active_invalidate_count)
1344 kvm->mn_active_invalidate_count = 0;
1345 else
1346 WARN_ON(kvm->mmu_invalidate_in_progress);
1347 #else
1348 kvm_flush_shadow_all(kvm);
1349 #endif
1350 kvm_arch_destroy_vm(kvm);
1351 kvm_destroy_devices(kvm);
1352 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1353 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1354 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1355 }
1356 cleanup_srcu_struct(&kvm->irq_srcu);
1357 cleanup_srcu_struct(&kvm->srcu);
1358 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1359 xa_destroy(&kvm->mem_attr_array);
1360 #endif
1361 kvm_arch_free_vm(kvm);
1362 preempt_notifier_dec();
1363 hardware_disable_all();
1364 mmdrop(mm);
1365 }
1366
kvm_get_kvm(struct kvm * kvm)1367 void kvm_get_kvm(struct kvm *kvm)
1368 {
1369 refcount_inc(&kvm->users_count);
1370 }
1371 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1372
1373 /*
1374 * Make sure the vm is not during destruction, which is a safe version of
1375 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1376 */
kvm_get_kvm_safe(struct kvm * kvm)1377 bool kvm_get_kvm_safe(struct kvm *kvm)
1378 {
1379 return refcount_inc_not_zero(&kvm->users_count);
1380 }
1381 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1382
kvm_put_kvm(struct kvm * kvm)1383 void kvm_put_kvm(struct kvm *kvm)
1384 {
1385 if (refcount_dec_and_test(&kvm->users_count))
1386 kvm_destroy_vm(kvm);
1387 }
1388 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1389
1390 /*
1391 * Used to put a reference that was taken on behalf of an object associated
1392 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1393 * of the new file descriptor fails and the reference cannot be transferred to
1394 * its final owner. In such cases, the caller is still actively using @kvm and
1395 * will fail miserably if the refcount unexpectedly hits zero.
1396 */
kvm_put_kvm_no_destroy(struct kvm * kvm)1397 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1398 {
1399 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1400 }
1401 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1402
kvm_vm_release(struct inode * inode,struct file * filp)1403 static int kvm_vm_release(struct inode *inode, struct file *filp)
1404 {
1405 struct kvm *kvm = filp->private_data;
1406
1407 kvm_irqfd_release(kvm);
1408
1409 kvm_put_kvm(kvm);
1410 return 0;
1411 }
1412
1413 /*
1414 * Allocation size is twice as large as the actual dirty bitmap size.
1415 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1416 */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)1417 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1418 {
1419 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1420
1421 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1422 if (!memslot->dirty_bitmap)
1423 return -ENOMEM;
1424
1425 return 0;
1426 }
1427
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)1428 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1429 {
1430 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1431 int node_idx_inactive = active->node_idx ^ 1;
1432
1433 return &kvm->__memslots[as_id][node_idx_inactive];
1434 }
1435
1436 /*
1437 * Helper to get the address space ID when one of memslot pointers may be NULL.
1438 * This also serves as a sanity that at least one of the pointers is non-NULL,
1439 * and that their address space IDs don't diverge.
1440 */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1441 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1442 struct kvm_memory_slot *b)
1443 {
1444 if (WARN_ON_ONCE(!a && !b))
1445 return 0;
1446
1447 if (!a)
1448 return b->as_id;
1449 if (!b)
1450 return a->as_id;
1451
1452 WARN_ON_ONCE(a->as_id != b->as_id);
1453 return a->as_id;
1454 }
1455
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1456 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1457 struct kvm_memory_slot *slot)
1458 {
1459 struct rb_root *gfn_tree = &slots->gfn_tree;
1460 struct rb_node **node, *parent;
1461 int idx = slots->node_idx;
1462
1463 parent = NULL;
1464 for (node = &gfn_tree->rb_node; *node; ) {
1465 struct kvm_memory_slot *tmp;
1466
1467 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1468 parent = *node;
1469 if (slot->base_gfn < tmp->base_gfn)
1470 node = &(*node)->rb_left;
1471 else if (slot->base_gfn > tmp->base_gfn)
1472 node = &(*node)->rb_right;
1473 else
1474 BUG();
1475 }
1476
1477 rb_link_node(&slot->gfn_node[idx], parent, node);
1478 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1479 }
1480
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1481 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1482 struct kvm_memory_slot *slot)
1483 {
1484 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1485 }
1486
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1487 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1488 struct kvm_memory_slot *old,
1489 struct kvm_memory_slot *new)
1490 {
1491 int idx = slots->node_idx;
1492
1493 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1494
1495 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1496 &slots->gfn_tree);
1497 }
1498
1499 /*
1500 * Replace @old with @new in the inactive memslots.
1501 *
1502 * With NULL @old this simply adds @new.
1503 * With NULL @new this simply removes @old.
1504 *
1505 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1506 * appropriately.
1507 */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1508 static void kvm_replace_memslot(struct kvm *kvm,
1509 struct kvm_memory_slot *old,
1510 struct kvm_memory_slot *new)
1511 {
1512 int as_id = kvm_memslots_get_as_id(old, new);
1513 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1514 int idx = slots->node_idx;
1515
1516 if (old) {
1517 hash_del(&old->id_node[idx]);
1518 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1519
1520 if ((long)old == atomic_long_read(&slots->last_used_slot))
1521 atomic_long_set(&slots->last_used_slot, (long)new);
1522
1523 if (!new) {
1524 kvm_erase_gfn_node(slots, old);
1525 return;
1526 }
1527 }
1528
1529 /*
1530 * Initialize @new's hva range. Do this even when replacing an @old
1531 * slot, kvm_copy_memslot() deliberately does not touch node data.
1532 */
1533 new->hva_node[idx].start = new->userspace_addr;
1534 new->hva_node[idx].last = new->userspace_addr +
1535 (new->npages << PAGE_SHIFT) - 1;
1536
1537 /*
1538 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1539 * hva_node needs to be swapped with remove+insert even though hva can't
1540 * change when replacing an existing slot.
1541 */
1542 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1543 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1544
1545 /*
1546 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1547 * switch the node in the gfn tree instead of removing the old and
1548 * inserting the new as two separate operations. Replacement is a
1549 * single O(1) operation versus two O(log(n)) operations for
1550 * remove+insert.
1551 */
1552 if (old && old->base_gfn == new->base_gfn) {
1553 kvm_replace_gfn_node(slots, old, new);
1554 } else {
1555 if (old)
1556 kvm_erase_gfn_node(slots, old);
1557 kvm_insert_gfn_node(slots, new);
1558 }
1559 }
1560
1561 /*
1562 * Flags that do not access any of the extra space of struct
1563 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1564 * only allows these.
1565 */
1566 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1567 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1568
check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1569 static int check_memory_region_flags(struct kvm *kvm,
1570 const struct kvm_userspace_memory_region2 *mem)
1571 {
1572 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1573
1574 if (kvm_arch_has_private_mem(kvm))
1575 valid_flags |= KVM_MEM_GUEST_MEMFD;
1576
1577 /* Dirty logging private memory is not currently supported. */
1578 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1579 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1580
1581 /*
1582 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1583 * read-only memslots have emulated MMIO, not page fault, semantics,
1584 * and KVM doesn't allow emulated MMIO for private memory.
1585 */
1586 if (kvm_arch_has_readonly_mem(kvm) &&
1587 !(mem->flags & KVM_MEM_GUEST_MEMFD))
1588 valid_flags |= KVM_MEM_READONLY;
1589
1590 if (mem->flags & ~valid_flags)
1591 return -EINVAL;
1592
1593 return 0;
1594 }
1595
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1596 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1597 {
1598 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1599
1600 /* Grab the generation from the activate memslots. */
1601 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1602
1603 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1604 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1605
1606 /*
1607 * Do not store the new memslots while there are invalidations in
1608 * progress, otherwise the locking in invalidate_range_start and
1609 * invalidate_range_end will be unbalanced.
1610 */
1611 spin_lock(&kvm->mn_invalidate_lock);
1612 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1613 while (kvm->mn_active_invalidate_count) {
1614 set_current_state(TASK_UNINTERRUPTIBLE);
1615 spin_unlock(&kvm->mn_invalidate_lock);
1616 schedule();
1617 spin_lock(&kvm->mn_invalidate_lock);
1618 }
1619 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1620 rcu_assign_pointer(kvm->memslots[as_id], slots);
1621 spin_unlock(&kvm->mn_invalidate_lock);
1622
1623 /*
1624 * Acquired in kvm_set_memslot. Must be released before synchronize
1625 * SRCU below in order to avoid deadlock with another thread
1626 * acquiring the slots_arch_lock in an srcu critical section.
1627 */
1628 mutex_unlock(&kvm->slots_arch_lock);
1629
1630 synchronize_srcu_expedited(&kvm->srcu);
1631
1632 /*
1633 * Increment the new memslot generation a second time, dropping the
1634 * update in-progress flag and incrementing the generation based on
1635 * the number of address spaces. This provides a unique and easily
1636 * identifiable generation number while the memslots are in flux.
1637 */
1638 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1639
1640 /*
1641 * Generations must be unique even across address spaces. We do not need
1642 * a global counter for that, instead the generation space is evenly split
1643 * across address spaces. For example, with two address spaces, address
1644 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1645 * use generations 1, 3, 5, ...
1646 */
1647 gen += kvm_arch_nr_memslot_as_ids(kvm);
1648
1649 kvm_arch_memslots_updated(kvm, gen);
1650
1651 slots->generation = gen;
1652 }
1653
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1654 static int kvm_prepare_memory_region(struct kvm *kvm,
1655 const struct kvm_memory_slot *old,
1656 struct kvm_memory_slot *new,
1657 enum kvm_mr_change change)
1658 {
1659 int r;
1660
1661 /*
1662 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1663 * will be freed on "commit". If logging is enabled in both old and
1664 * new, reuse the existing bitmap. If logging is enabled only in the
1665 * new and KVM isn't using a ring buffer, allocate and initialize a
1666 * new bitmap.
1667 */
1668 if (change != KVM_MR_DELETE) {
1669 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1670 new->dirty_bitmap = NULL;
1671 else if (old && old->dirty_bitmap)
1672 new->dirty_bitmap = old->dirty_bitmap;
1673 else if (kvm_use_dirty_bitmap(kvm)) {
1674 r = kvm_alloc_dirty_bitmap(new);
1675 if (r)
1676 return r;
1677
1678 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1679 bitmap_set(new->dirty_bitmap, 0, new->npages);
1680 }
1681 }
1682
1683 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1684
1685 /* Free the bitmap on failure if it was allocated above. */
1686 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1687 kvm_destroy_dirty_bitmap(new);
1688
1689 return r;
1690 }
1691
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1692 static void kvm_commit_memory_region(struct kvm *kvm,
1693 struct kvm_memory_slot *old,
1694 const struct kvm_memory_slot *new,
1695 enum kvm_mr_change change)
1696 {
1697 int old_flags = old ? old->flags : 0;
1698 int new_flags = new ? new->flags : 0;
1699 /*
1700 * Update the total number of memslot pages before calling the arch
1701 * hook so that architectures can consume the result directly.
1702 */
1703 if (change == KVM_MR_DELETE)
1704 kvm->nr_memslot_pages -= old->npages;
1705 else if (change == KVM_MR_CREATE)
1706 kvm->nr_memslot_pages += new->npages;
1707
1708 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1709 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1710 atomic_set(&kvm->nr_memslots_dirty_logging,
1711 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1712 }
1713
1714 kvm_arch_commit_memory_region(kvm, old, new, change);
1715
1716 switch (change) {
1717 case KVM_MR_CREATE:
1718 /* Nothing more to do. */
1719 break;
1720 case KVM_MR_DELETE:
1721 /* Free the old memslot and all its metadata. */
1722 kvm_free_memslot(kvm, old);
1723 break;
1724 case KVM_MR_MOVE:
1725 case KVM_MR_FLAGS_ONLY:
1726 /*
1727 * Free the dirty bitmap as needed; the below check encompasses
1728 * both the flags and whether a ring buffer is being used)
1729 */
1730 if (old->dirty_bitmap && !new->dirty_bitmap)
1731 kvm_destroy_dirty_bitmap(old);
1732
1733 /*
1734 * The final quirk. Free the detached, old slot, but only its
1735 * memory, not any metadata. Metadata, including arch specific
1736 * data, may be reused by @new.
1737 */
1738 kfree(old);
1739 break;
1740 default:
1741 BUG();
1742 }
1743 }
1744
1745 /*
1746 * Activate @new, which must be installed in the inactive slots by the caller,
1747 * by swapping the active slots and then propagating @new to @old once @old is
1748 * unreachable and can be safely modified.
1749 *
1750 * With NULL @old this simply adds @new to @active (while swapping the sets).
1751 * With NULL @new this simply removes @old from @active and frees it
1752 * (while also swapping the sets).
1753 */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1754 static void kvm_activate_memslot(struct kvm *kvm,
1755 struct kvm_memory_slot *old,
1756 struct kvm_memory_slot *new)
1757 {
1758 int as_id = kvm_memslots_get_as_id(old, new);
1759
1760 kvm_swap_active_memslots(kvm, as_id);
1761
1762 /* Propagate the new memslot to the now inactive memslots. */
1763 kvm_replace_memslot(kvm, old, new);
1764 }
1765
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1766 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1767 const struct kvm_memory_slot *src)
1768 {
1769 dest->base_gfn = src->base_gfn;
1770 dest->npages = src->npages;
1771 dest->dirty_bitmap = src->dirty_bitmap;
1772 dest->arch = src->arch;
1773 dest->userspace_addr = src->userspace_addr;
1774 dest->flags = src->flags;
1775 dest->id = src->id;
1776 dest->as_id = src->as_id;
1777 }
1778
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1779 static void kvm_invalidate_memslot(struct kvm *kvm,
1780 struct kvm_memory_slot *old,
1781 struct kvm_memory_slot *invalid_slot)
1782 {
1783 /*
1784 * Mark the current slot INVALID. As with all memslot modifications,
1785 * this must be done on an unreachable slot to avoid modifying the
1786 * current slot in the active tree.
1787 */
1788 kvm_copy_memslot(invalid_slot, old);
1789 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1790 kvm_replace_memslot(kvm, old, invalid_slot);
1791
1792 /*
1793 * Activate the slot that is now marked INVALID, but don't propagate
1794 * the slot to the now inactive slots. The slot is either going to be
1795 * deleted or recreated as a new slot.
1796 */
1797 kvm_swap_active_memslots(kvm, old->as_id);
1798
1799 /*
1800 * From this point no new shadow pages pointing to a deleted, or moved,
1801 * memslot will be created. Validation of sp->gfn happens in:
1802 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1803 * - kvm_is_visible_gfn (mmu_check_root)
1804 */
1805 kvm_arch_flush_shadow_memslot(kvm, old);
1806 kvm_arch_guest_memory_reclaimed(kvm);
1807
1808 /* Was released by kvm_swap_active_memslots(), reacquire. */
1809 mutex_lock(&kvm->slots_arch_lock);
1810
1811 /*
1812 * Copy the arch-specific field of the newly-installed slot back to the
1813 * old slot as the arch data could have changed between releasing
1814 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1815 * above. Writers are required to retrieve memslots *after* acquiring
1816 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1817 */
1818 old->arch = invalid_slot->arch;
1819 }
1820
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1821 static void kvm_create_memslot(struct kvm *kvm,
1822 struct kvm_memory_slot *new)
1823 {
1824 /* Add the new memslot to the inactive set and activate. */
1825 kvm_replace_memslot(kvm, NULL, new);
1826 kvm_activate_memslot(kvm, NULL, new);
1827 }
1828
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1829 static void kvm_delete_memslot(struct kvm *kvm,
1830 struct kvm_memory_slot *old,
1831 struct kvm_memory_slot *invalid_slot)
1832 {
1833 /*
1834 * Remove the old memslot (in the inactive memslots) by passing NULL as
1835 * the "new" slot, and for the invalid version in the active slots.
1836 */
1837 kvm_replace_memslot(kvm, old, NULL);
1838 kvm_activate_memslot(kvm, invalid_slot, NULL);
1839 }
1840
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1841 static void kvm_move_memslot(struct kvm *kvm,
1842 struct kvm_memory_slot *old,
1843 struct kvm_memory_slot *new,
1844 struct kvm_memory_slot *invalid_slot)
1845 {
1846 /*
1847 * Replace the old memslot in the inactive slots, and then swap slots
1848 * and replace the current INVALID with the new as well.
1849 */
1850 kvm_replace_memslot(kvm, old, new);
1851 kvm_activate_memslot(kvm, invalid_slot, new);
1852 }
1853
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1854 static void kvm_update_flags_memslot(struct kvm *kvm,
1855 struct kvm_memory_slot *old,
1856 struct kvm_memory_slot *new)
1857 {
1858 /*
1859 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1860 * an intermediate step. Instead, the old memslot is simply replaced
1861 * with a new, updated copy in both memslot sets.
1862 */
1863 kvm_replace_memslot(kvm, old, new);
1864 kvm_activate_memslot(kvm, old, new);
1865 }
1866
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1867 static int kvm_set_memslot(struct kvm *kvm,
1868 struct kvm_memory_slot *old,
1869 struct kvm_memory_slot *new,
1870 enum kvm_mr_change change)
1871 {
1872 struct kvm_memory_slot *invalid_slot;
1873 int r;
1874
1875 /*
1876 * Released in kvm_swap_active_memslots().
1877 *
1878 * Must be held from before the current memslots are copied until after
1879 * the new memslots are installed with rcu_assign_pointer, then
1880 * released before the synchronize srcu in kvm_swap_active_memslots().
1881 *
1882 * When modifying memslots outside of the slots_lock, must be held
1883 * before reading the pointer to the current memslots until after all
1884 * changes to those memslots are complete.
1885 *
1886 * These rules ensure that installing new memslots does not lose
1887 * changes made to the previous memslots.
1888 */
1889 mutex_lock(&kvm->slots_arch_lock);
1890
1891 /*
1892 * Invalidate the old slot if it's being deleted or moved. This is
1893 * done prior to actually deleting/moving the memslot to allow vCPUs to
1894 * continue running by ensuring there are no mappings or shadow pages
1895 * for the memslot when it is deleted/moved. Without pre-invalidation
1896 * (and without a lock), a window would exist between effecting the
1897 * delete/move and committing the changes in arch code where KVM or a
1898 * guest could access a non-existent memslot.
1899 *
1900 * Modifications are done on a temporary, unreachable slot. The old
1901 * slot needs to be preserved in case a later step fails and the
1902 * invalidation needs to be reverted.
1903 */
1904 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1905 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1906 if (!invalid_slot) {
1907 mutex_unlock(&kvm->slots_arch_lock);
1908 return -ENOMEM;
1909 }
1910 kvm_invalidate_memslot(kvm, old, invalid_slot);
1911 }
1912
1913 r = kvm_prepare_memory_region(kvm, old, new, change);
1914 if (r) {
1915 /*
1916 * For DELETE/MOVE, revert the above INVALID change. No
1917 * modifications required since the original slot was preserved
1918 * in the inactive slots. Changing the active memslots also
1919 * release slots_arch_lock.
1920 */
1921 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1922 kvm_activate_memslot(kvm, invalid_slot, old);
1923 kfree(invalid_slot);
1924 } else {
1925 mutex_unlock(&kvm->slots_arch_lock);
1926 }
1927 return r;
1928 }
1929
1930 /*
1931 * For DELETE and MOVE, the working slot is now active as the INVALID
1932 * version of the old slot. MOVE is particularly special as it reuses
1933 * the old slot and returns a copy of the old slot (in working_slot).
1934 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1935 * old slot is detached but otherwise preserved.
1936 */
1937 if (change == KVM_MR_CREATE)
1938 kvm_create_memslot(kvm, new);
1939 else if (change == KVM_MR_DELETE)
1940 kvm_delete_memslot(kvm, old, invalid_slot);
1941 else if (change == KVM_MR_MOVE)
1942 kvm_move_memslot(kvm, old, new, invalid_slot);
1943 else if (change == KVM_MR_FLAGS_ONLY)
1944 kvm_update_flags_memslot(kvm, old, new);
1945 else
1946 BUG();
1947
1948 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1949 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1950 kfree(invalid_slot);
1951
1952 /*
1953 * No need to refresh new->arch, changes after dropping slots_arch_lock
1954 * will directly hit the final, active memslot. Architectures are
1955 * responsible for knowing that new->arch may be stale.
1956 */
1957 kvm_commit_memory_region(kvm, old, new, change);
1958
1959 return 0;
1960 }
1961
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1962 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1963 gfn_t start, gfn_t end)
1964 {
1965 struct kvm_memslot_iter iter;
1966
1967 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1968 if (iter.slot->id != id)
1969 return true;
1970 }
1971
1972 return false;
1973 }
1974
1975 /*
1976 * Allocate some memory and give it an address in the guest physical address
1977 * space.
1978 *
1979 * Discontiguous memory is allowed, mostly for framebuffers.
1980 *
1981 * Must be called holding kvm->slots_lock for write.
1982 */
__kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1983 int __kvm_set_memory_region(struct kvm *kvm,
1984 const struct kvm_userspace_memory_region2 *mem)
1985 {
1986 struct kvm_memory_slot *old, *new;
1987 struct kvm_memslots *slots;
1988 enum kvm_mr_change change;
1989 unsigned long npages;
1990 gfn_t base_gfn;
1991 int as_id, id;
1992 int r;
1993
1994 r = check_memory_region_flags(kvm, mem);
1995 if (r)
1996 return r;
1997
1998 as_id = mem->slot >> 16;
1999 id = (u16)mem->slot;
2000
2001 /* General sanity checks */
2002 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2003 (mem->memory_size != (unsigned long)mem->memory_size))
2004 return -EINVAL;
2005 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2006 return -EINVAL;
2007 /* We can read the guest memory with __xxx_user() later on. */
2008 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2009 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2010 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2011 mem->memory_size))
2012 return -EINVAL;
2013 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2014 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2015 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2016 return -EINVAL;
2017 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2018 return -EINVAL;
2019 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2020 return -EINVAL;
2021 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2022 return -EINVAL;
2023
2024 slots = __kvm_memslots(kvm, as_id);
2025
2026 /*
2027 * Note, the old memslot (and the pointer itself!) may be invalidated
2028 * and/or destroyed by kvm_set_memslot().
2029 */
2030 old = id_to_memslot(slots, id);
2031
2032 if (!mem->memory_size) {
2033 if (!old || !old->npages)
2034 return -EINVAL;
2035
2036 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2037 return -EIO;
2038
2039 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2040 }
2041
2042 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2043 npages = (mem->memory_size >> PAGE_SHIFT);
2044
2045 if (!old || !old->npages) {
2046 change = KVM_MR_CREATE;
2047
2048 /*
2049 * To simplify KVM internals, the total number of pages across
2050 * all memslots must fit in an unsigned long.
2051 */
2052 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2053 return -EINVAL;
2054 } else { /* Modify an existing slot. */
2055 /* Private memslots are immutable, they can only be deleted. */
2056 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2057 return -EINVAL;
2058 if ((mem->userspace_addr != old->userspace_addr) ||
2059 (npages != old->npages) ||
2060 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2061 return -EINVAL;
2062
2063 if (base_gfn != old->base_gfn)
2064 change = KVM_MR_MOVE;
2065 else if (mem->flags != old->flags)
2066 change = KVM_MR_FLAGS_ONLY;
2067 else /* Nothing to change. */
2068 return 0;
2069 }
2070
2071 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2072 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2073 return -EEXIST;
2074
2075 /* Allocate a slot that will persist in the memslot. */
2076 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2077 if (!new)
2078 return -ENOMEM;
2079
2080 new->as_id = as_id;
2081 new->id = id;
2082 new->base_gfn = base_gfn;
2083 new->npages = npages;
2084 new->flags = mem->flags;
2085 new->userspace_addr = mem->userspace_addr;
2086 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2087 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2088 if (r)
2089 goto out;
2090 }
2091
2092 r = kvm_set_memslot(kvm, old, new, change);
2093 if (r)
2094 goto out_unbind;
2095
2096 return 0;
2097
2098 out_unbind:
2099 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2100 kvm_gmem_unbind(new);
2101 out:
2102 kfree(new);
2103 return r;
2104 }
2105 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2106
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2107 int kvm_set_memory_region(struct kvm *kvm,
2108 const struct kvm_userspace_memory_region2 *mem)
2109 {
2110 int r;
2111
2112 mutex_lock(&kvm->slots_lock);
2113 r = __kvm_set_memory_region(kvm, mem);
2114 mutex_unlock(&kvm->slots_lock);
2115 return r;
2116 }
2117 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2118
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem)2119 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2120 struct kvm_userspace_memory_region2 *mem)
2121 {
2122 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2123 return -EINVAL;
2124
2125 return kvm_set_memory_region(kvm, mem);
2126 }
2127
2128 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2129 /**
2130 * kvm_get_dirty_log - get a snapshot of dirty pages
2131 * @kvm: pointer to kvm instance
2132 * @log: slot id and address to which we copy the log
2133 * @is_dirty: set to '1' if any dirty pages were found
2134 * @memslot: set to the associated memslot, always valid on success
2135 */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)2136 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2137 int *is_dirty, struct kvm_memory_slot **memslot)
2138 {
2139 struct kvm_memslots *slots;
2140 int i, as_id, id;
2141 unsigned long n;
2142 unsigned long any = 0;
2143
2144 /* Dirty ring tracking may be exclusive to dirty log tracking */
2145 if (!kvm_use_dirty_bitmap(kvm))
2146 return -ENXIO;
2147
2148 *memslot = NULL;
2149 *is_dirty = 0;
2150
2151 as_id = log->slot >> 16;
2152 id = (u16)log->slot;
2153 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2154 return -EINVAL;
2155
2156 slots = __kvm_memslots(kvm, as_id);
2157 *memslot = id_to_memslot(slots, id);
2158 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2159 return -ENOENT;
2160
2161 kvm_arch_sync_dirty_log(kvm, *memslot);
2162
2163 n = kvm_dirty_bitmap_bytes(*memslot);
2164
2165 for (i = 0; !any && i < n/sizeof(long); ++i)
2166 any = (*memslot)->dirty_bitmap[i];
2167
2168 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2169 return -EFAULT;
2170
2171 if (any)
2172 *is_dirty = 1;
2173 return 0;
2174 }
2175 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2176
2177 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2178 /**
2179 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2180 * and reenable dirty page tracking for the corresponding pages.
2181 * @kvm: pointer to kvm instance
2182 * @log: slot id and address to which we copy the log
2183 *
2184 * We need to keep it in mind that VCPU threads can write to the bitmap
2185 * concurrently. So, to avoid losing track of dirty pages we keep the
2186 * following order:
2187 *
2188 * 1. Take a snapshot of the bit and clear it if needed.
2189 * 2. Write protect the corresponding page.
2190 * 3. Copy the snapshot to the userspace.
2191 * 4. Upon return caller flushes TLB's if needed.
2192 *
2193 * Between 2 and 4, the guest may write to the page using the remaining TLB
2194 * entry. This is not a problem because the page is reported dirty using
2195 * the snapshot taken before and step 4 ensures that writes done after
2196 * exiting to userspace will be logged for the next call.
2197 *
2198 */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)2199 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2200 {
2201 struct kvm_memslots *slots;
2202 struct kvm_memory_slot *memslot;
2203 int i, as_id, id;
2204 unsigned long n;
2205 unsigned long *dirty_bitmap;
2206 unsigned long *dirty_bitmap_buffer;
2207 bool flush;
2208
2209 /* Dirty ring tracking may be exclusive to dirty log tracking */
2210 if (!kvm_use_dirty_bitmap(kvm))
2211 return -ENXIO;
2212
2213 as_id = log->slot >> 16;
2214 id = (u16)log->slot;
2215 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2216 return -EINVAL;
2217
2218 slots = __kvm_memslots(kvm, as_id);
2219 memslot = id_to_memslot(slots, id);
2220 if (!memslot || !memslot->dirty_bitmap)
2221 return -ENOENT;
2222
2223 dirty_bitmap = memslot->dirty_bitmap;
2224
2225 kvm_arch_sync_dirty_log(kvm, memslot);
2226
2227 n = kvm_dirty_bitmap_bytes(memslot);
2228 flush = false;
2229 if (kvm->manual_dirty_log_protect) {
2230 /*
2231 * Unlike kvm_get_dirty_log, we always return false in *flush,
2232 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2233 * is some code duplication between this function and
2234 * kvm_get_dirty_log, but hopefully all architecture
2235 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2236 * can be eliminated.
2237 */
2238 dirty_bitmap_buffer = dirty_bitmap;
2239 } else {
2240 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2241 memset(dirty_bitmap_buffer, 0, n);
2242
2243 KVM_MMU_LOCK(kvm);
2244 for (i = 0; i < n / sizeof(long); i++) {
2245 unsigned long mask;
2246 gfn_t offset;
2247
2248 if (!dirty_bitmap[i])
2249 continue;
2250
2251 flush = true;
2252 mask = xchg(&dirty_bitmap[i], 0);
2253 dirty_bitmap_buffer[i] = mask;
2254
2255 offset = i * BITS_PER_LONG;
2256 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2257 offset, mask);
2258 }
2259 KVM_MMU_UNLOCK(kvm);
2260 }
2261
2262 if (flush)
2263 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2264
2265 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2266 return -EFAULT;
2267 return 0;
2268 }
2269
2270
2271 /**
2272 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2273 * @kvm: kvm instance
2274 * @log: slot id and address to which we copy the log
2275 *
2276 * Steps 1-4 below provide general overview of dirty page logging. See
2277 * kvm_get_dirty_log_protect() function description for additional details.
2278 *
2279 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2280 * always flush the TLB (step 4) even if previous step failed and the dirty
2281 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2282 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2283 * writes will be marked dirty for next log read.
2284 *
2285 * 1. Take a snapshot of the bit and clear it if needed.
2286 * 2. Write protect the corresponding page.
2287 * 3. Copy the snapshot to the userspace.
2288 * 4. Flush TLB's if needed.
2289 */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)2290 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2291 struct kvm_dirty_log *log)
2292 {
2293 int r;
2294
2295 mutex_lock(&kvm->slots_lock);
2296
2297 r = kvm_get_dirty_log_protect(kvm, log);
2298
2299 mutex_unlock(&kvm->slots_lock);
2300 return r;
2301 }
2302
2303 /**
2304 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2305 * and reenable dirty page tracking for the corresponding pages.
2306 * @kvm: pointer to kvm instance
2307 * @log: slot id and address from which to fetch the bitmap of dirty pages
2308 */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)2309 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2310 struct kvm_clear_dirty_log *log)
2311 {
2312 struct kvm_memslots *slots;
2313 struct kvm_memory_slot *memslot;
2314 int as_id, id;
2315 gfn_t offset;
2316 unsigned long i, n;
2317 unsigned long *dirty_bitmap;
2318 unsigned long *dirty_bitmap_buffer;
2319 bool flush;
2320
2321 /* Dirty ring tracking may be exclusive to dirty log tracking */
2322 if (!kvm_use_dirty_bitmap(kvm))
2323 return -ENXIO;
2324
2325 as_id = log->slot >> 16;
2326 id = (u16)log->slot;
2327 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2328 return -EINVAL;
2329
2330 if (log->first_page & 63)
2331 return -EINVAL;
2332
2333 slots = __kvm_memslots(kvm, as_id);
2334 memslot = id_to_memslot(slots, id);
2335 if (!memslot || !memslot->dirty_bitmap)
2336 return -ENOENT;
2337
2338 dirty_bitmap = memslot->dirty_bitmap;
2339
2340 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2341
2342 if (log->first_page > memslot->npages ||
2343 log->num_pages > memslot->npages - log->first_page ||
2344 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2345 return -EINVAL;
2346
2347 kvm_arch_sync_dirty_log(kvm, memslot);
2348
2349 flush = false;
2350 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2351 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2352 return -EFAULT;
2353
2354 KVM_MMU_LOCK(kvm);
2355 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2356 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2357 i++, offset += BITS_PER_LONG) {
2358 unsigned long mask = *dirty_bitmap_buffer++;
2359 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2360 if (!mask)
2361 continue;
2362
2363 mask &= atomic_long_fetch_andnot(mask, p);
2364
2365 /*
2366 * mask contains the bits that really have been cleared. This
2367 * never includes any bits beyond the length of the memslot (if
2368 * the length is not aligned to 64 pages), therefore it is not
2369 * a problem if userspace sets them in log->dirty_bitmap.
2370 */
2371 if (mask) {
2372 flush = true;
2373 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2374 offset, mask);
2375 }
2376 }
2377 KVM_MMU_UNLOCK(kvm);
2378
2379 if (flush)
2380 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2381
2382 return 0;
2383 }
2384
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2385 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2386 struct kvm_clear_dirty_log *log)
2387 {
2388 int r;
2389
2390 mutex_lock(&kvm->slots_lock);
2391
2392 r = kvm_clear_dirty_log_protect(kvm, log);
2393
2394 mutex_unlock(&kvm->slots_lock);
2395 return r;
2396 }
2397 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2398
2399 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_supported_mem_attributes(struct kvm * kvm)2400 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2401 {
2402 if (!kvm || kvm_arch_has_private_mem(kvm))
2403 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2404
2405 return 0;
2406 }
2407
2408 /*
2409 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2410 * such that the bits in @mask match @attrs.
2411 */
kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs)2412 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2413 unsigned long mask, unsigned long attrs)
2414 {
2415 XA_STATE(xas, &kvm->mem_attr_array, start);
2416 unsigned long index;
2417 void *entry;
2418
2419 mask &= kvm_supported_mem_attributes(kvm);
2420 if (attrs & ~mask)
2421 return false;
2422
2423 if (end == start + 1)
2424 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2425
2426 guard(rcu)();
2427 if (!attrs)
2428 return !xas_find(&xas, end - 1);
2429
2430 for (index = start; index < end; index++) {
2431 do {
2432 entry = xas_next(&xas);
2433 } while (xas_retry(&xas, entry));
2434
2435 if (xas.xa_index != index ||
2436 (xa_to_value(entry) & mask) != attrs)
2437 return false;
2438 }
2439
2440 return true;
2441 }
2442
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range)2443 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2444 struct kvm_mmu_notifier_range *range)
2445 {
2446 struct kvm_gfn_range gfn_range;
2447 struct kvm_memory_slot *slot;
2448 struct kvm_memslots *slots;
2449 struct kvm_memslot_iter iter;
2450 bool found_memslot = false;
2451 bool ret = false;
2452 int i;
2453
2454 gfn_range.arg = range->arg;
2455 gfn_range.may_block = range->may_block;
2456
2457 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2458 slots = __kvm_memslots(kvm, i);
2459
2460 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2461 slot = iter.slot;
2462 gfn_range.slot = slot;
2463
2464 gfn_range.start = max(range->start, slot->base_gfn);
2465 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2466 if (gfn_range.start >= gfn_range.end)
2467 continue;
2468
2469 if (!found_memslot) {
2470 found_memslot = true;
2471 KVM_MMU_LOCK(kvm);
2472 if (!IS_KVM_NULL_FN(range->on_lock))
2473 range->on_lock(kvm);
2474 }
2475
2476 ret |= range->handler(kvm, &gfn_range);
2477 }
2478 }
2479
2480 if (range->flush_on_ret && ret)
2481 kvm_flush_remote_tlbs(kvm);
2482
2483 if (found_memslot)
2484 KVM_MMU_UNLOCK(kvm);
2485 }
2486
kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)2487 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2488 struct kvm_gfn_range *range)
2489 {
2490 /*
2491 * Unconditionally add the range to the invalidation set, regardless of
2492 * whether or not the arch callback actually needs to zap SPTEs. E.g.
2493 * if KVM supports RWX attributes in the future and the attributes are
2494 * going from R=>RW, zapping isn't strictly necessary. Unconditionally
2495 * adding the range allows KVM to require that MMU invalidations add at
2496 * least one range between begin() and end(), e.g. allows KVM to detect
2497 * bugs where the add() is missed. Relaxing the rule *might* be safe,
2498 * but it's not obvious that allowing new mappings while the attributes
2499 * are in flux is desirable or worth the complexity.
2500 */
2501 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2502
2503 return kvm_arch_pre_set_memory_attributes(kvm, range);
2504 }
2505
2506 /* Set @attributes for the gfn range [@start, @end). */
kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes)2507 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2508 unsigned long attributes)
2509 {
2510 struct kvm_mmu_notifier_range pre_set_range = {
2511 .start = start,
2512 .end = end,
2513 .handler = kvm_pre_set_memory_attributes,
2514 .on_lock = kvm_mmu_invalidate_begin,
2515 .flush_on_ret = true,
2516 .may_block = true,
2517 };
2518 struct kvm_mmu_notifier_range post_set_range = {
2519 .start = start,
2520 .end = end,
2521 .arg.attributes = attributes,
2522 .handler = kvm_arch_post_set_memory_attributes,
2523 .on_lock = kvm_mmu_invalidate_end,
2524 .may_block = true,
2525 };
2526 unsigned long i;
2527 void *entry;
2528 int r = 0;
2529
2530 entry = attributes ? xa_mk_value(attributes) : NULL;
2531
2532 mutex_lock(&kvm->slots_lock);
2533
2534 /* Nothing to do if the entire range as the desired attributes. */
2535 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2536 goto out_unlock;
2537
2538 /*
2539 * Reserve memory ahead of time to avoid having to deal with failures
2540 * partway through setting the new attributes.
2541 */
2542 for (i = start; i < end; i++) {
2543 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2544 if (r)
2545 goto out_unlock;
2546 }
2547
2548 kvm_handle_gfn_range(kvm, &pre_set_range);
2549
2550 for (i = start; i < end; i++) {
2551 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2552 GFP_KERNEL_ACCOUNT));
2553 KVM_BUG_ON(r, kvm);
2554 }
2555
2556 kvm_handle_gfn_range(kvm, &post_set_range);
2557
2558 out_unlock:
2559 mutex_unlock(&kvm->slots_lock);
2560
2561 return r;
2562 }
kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs)2563 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2564 struct kvm_memory_attributes *attrs)
2565 {
2566 gfn_t start, end;
2567
2568 /* flags is currently not used. */
2569 if (attrs->flags)
2570 return -EINVAL;
2571 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2572 return -EINVAL;
2573 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2574 return -EINVAL;
2575 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2576 return -EINVAL;
2577
2578 start = attrs->address >> PAGE_SHIFT;
2579 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2580
2581 /*
2582 * xarray tracks data using "unsigned long", and as a result so does
2583 * KVM. For simplicity, supports generic attributes only on 64-bit
2584 * architectures.
2585 */
2586 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2587
2588 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2589 }
2590 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2591
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2592 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2593 {
2594 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2595 }
2596 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2597
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2598 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2599 {
2600 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2601 u64 gen = slots->generation;
2602 struct kvm_memory_slot *slot;
2603
2604 /*
2605 * This also protects against using a memslot from a different address space,
2606 * since different address spaces have different generation numbers.
2607 */
2608 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2609 vcpu->last_used_slot = NULL;
2610 vcpu->last_used_slot_gen = gen;
2611 }
2612
2613 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2614 if (slot)
2615 return slot;
2616
2617 /*
2618 * Fall back to searching all memslots. We purposely use
2619 * search_memslots() instead of __gfn_to_memslot() to avoid
2620 * thrashing the VM-wide last_used_slot in kvm_memslots.
2621 */
2622 slot = search_memslots(slots, gfn, false);
2623 if (slot) {
2624 vcpu->last_used_slot = slot;
2625 return slot;
2626 }
2627
2628 return NULL;
2629 }
2630
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)2631 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2632 {
2633 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2634
2635 return kvm_is_visible_memslot(memslot);
2636 }
2637 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2638
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)2639 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2640 {
2641 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2642
2643 return kvm_is_visible_memslot(memslot);
2644 }
2645 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2646
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)2647 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2648 {
2649 struct vm_area_struct *vma;
2650 unsigned long addr, size;
2651
2652 size = PAGE_SIZE;
2653
2654 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2655 if (kvm_is_error_hva(addr))
2656 return PAGE_SIZE;
2657
2658 mmap_read_lock(current->mm);
2659 vma = find_vma(current->mm, addr);
2660 if (!vma)
2661 goto out;
2662
2663 size = vma_kernel_pagesize(vma);
2664
2665 out:
2666 mmap_read_unlock(current->mm);
2667
2668 return size;
2669 }
2670
memslot_is_readonly(const struct kvm_memory_slot * slot)2671 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2672 {
2673 return slot->flags & KVM_MEM_READONLY;
2674 }
2675
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)2676 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2677 gfn_t *nr_pages, bool write)
2678 {
2679 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2680 return KVM_HVA_ERR_BAD;
2681
2682 if (memslot_is_readonly(slot) && write)
2683 return KVM_HVA_ERR_RO_BAD;
2684
2685 if (nr_pages)
2686 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2687
2688 return __gfn_to_hva_memslot(slot, gfn);
2689 }
2690
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2691 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2692 gfn_t *nr_pages)
2693 {
2694 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2695 }
2696
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2697 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2698 gfn_t gfn)
2699 {
2700 return gfn_to_hva_many(slot, gfn, NULL);
2701 }
2702 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2703
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2704 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2705 {
2706 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2707 }
2708 EXPORT_SYMBOL_GPL(gfn_to_hva);
2709
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)2710 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2711 {
2712 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2713 }
2714 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2715
2716 /*
2717 * Return the hva of a @gfn and the R/W attribute if possible.
2718 *
2719 * @slot: the kvm_memory_slot which contains @gfn
2720 * @gfn: the gfn to be translated
2721 * @writable: used to return the read/write attribute of the @slot if the hva
2722 * is valid and @writable is not NULL
2723 */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2724 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2725 gfn_t gfn, bool *writable)
2726 {
2727 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2728
2729 if (!kvm_is_error_hva(hva) && writable)
2730 *writable = !memslot_is_readonly(slot);
2731
2732 return hva;
2733 }
2734
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)2735 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2736 {
2737 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2738
2739 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2740 }
2741
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)2742 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2743 {
2744 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2745
2746 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2747 }
2748
check_user_page_hwpoison(unsigned long addr)2749 static inline int check_user_page_hwpoison(unsigned long addr)
2750 {
2751 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2752
2753 rc = get_user_pages(addr, 1, flags, NULL);
2754 return rc == -EHWPOISON;
2755 }
2756
2757 /*
2758 * The fast path to get the writable pfn which will be stored in @pfn,
2759 * true indicates success, otherwise false is returned. It's also the
2760 * only part that runs if we can in atomic context.
2761 */
hva_to_pfn_fast(unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * pfn)2762 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2763 bool *writable, kvm_pfn_t *pfn)
2764 {
2765 struct page *page[1];
2766
2767 /*
2768 * Fast pin a writable pfn only if it is a write fault request
2769 * or the caller allows to map a writable pfn for a read fault
2770 * request.
2771 */
2772 if (!(write_fault || writable))
2773 return false;
2774
2775 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2776 *pfn = page_to_pfn(page[0]);
2777
2778 if (writable)
2779 *writable = true;
2780 return true;
2781 }
2782
2783 return false;
2784 }
2785
2786 /*
2787 * The slow path to get the pfn of the specified host virtual address,
2788 * 1 indicates success, -errno is returned if error is detected.
2789 */
hva_to_pfn_slow(unsigned long addr,bool * async,bool write_fault,bool interruptible,bool * writable,kvm_pfn_t * pfn)2790 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2791 bool interruptible, bool *writable, kvm_pfn_t *pfn)
2792 {
2793 /*
2794 * When a VCPU accesses a page that is not mapped into the secondary
2795 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2796 * make progress. We always want to honor NUMA hinting faults in that
2797 * case, because GUP usage corresponds to memory accesses from the VCPU.
2798 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2799 * mapped into the secondary MMU and gets accessed by a VCPU.
2800 *
2801 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2802 * implicitly honor NUMA hinting faults and don't need this flag.
2803 */
2804 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2805 struct page *page;
2806 int npages;
2807
2808 might_sleep();
2809
2810 if (writable)
2811 *writable = write_fault;
2812
2813 if (write_fault)
2814 flags |= FOLL_WRITE;
2815 if (async)
2816 flags |= FOLL_NOWAIT;
2817 if (interruptible)
2818 flags |= FOLL_INTERRUPTIBLE;
2819
2820 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2821 if (npages != 1)
2822 return npages;
2823
2824 /* map read fault as writable if possible */
2825 if (unlikely(!write_fault) && writable) {
2826 struct page *wpage;
2827
2828 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2829 *writable = true;
2830 put_page(page);
2831 page = wpage;
2832 }
2833 }
2834 *pfn = page_to_pfn(page);
2835 return npages;
2836 }
2837
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2838 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2839 {
2840 if (unlikely(!(vma->vm_flags & VM_READ)))
2841 return false;
2842
2843 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2844 return false;
2845
2846 return true;
2847 }
2848
kvm_try_get_pfn(kvm_pfn_t pfn)2849 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2850 {
2851 struct page *page = kvm_pfn_to_refcounted_page(pfn);
2852
2853 if (!page)
2854 return 1;
2855
2856 return get_page_unless_zero(page);
2857 }
2858
hva_to_pfn_remapped(struct vm_area_struct * vma,unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * p_pfn)2859 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2860 unsigned long addr, bool write_fault,
2861 bool *writable, kvm_pfn_t *p_pfn)
2862 {
2863 kvm_pfn_t pfn;
2864 pte_t *ptep;
2865 pte_t pte;
2866 spinlock_t *ptl;
2867 int r;
2868
2869 r = follow_pte(vma, addr, &ptep, &ptl);
2870 if (r) {
2871 /*
2872 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2873 * not call the fault handler, so do it here.
2874 */
2875 bool unlocked = false;
2876 r = fixup_user_fault(current->mm, addr,
2877 (write_fault ? FAULT_FLAG_WRITE : 0),
2878 &unlocked);
2879 if (unlocked)
2880 return -EAGAIN;
2881 if (r)
2882 return r;
2883
2884 r = follow_pte(vma, addr, &ptep, &ptl);
2885 if (r)
2886 return r;
2887 }
2888
2889 pte = ptep_get(ptep);
2890
2891 if (write_fault && !pte_write(pte)) {
2892 pfn = KVM_PFN_ERR_RO_FAULT;
2893 goto out;
2894 }
2895
2896 if (writable)
2897 *writable = pte_write(pte);
2898 pfn = pte_pfn(pte);
2899
2900 /*
2901 * Get a reference here because callers of *hva_to_pfn* and
2902 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2903 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2904 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2905 * simply do nothing for reserved pfns.
2906 *
2907 * Whoever called remap_pfn_range is also going to call e.g.
2908 * unmap_mapping_range before the underlying pages are freed,
2909 * causing a call to our MMU notifier.
2910 *
2911 * Certain IO or PFNMAP mappings can be backed with valid
2912 * struct pages, but be allocated without refcounting e.g.,
2913 * tail pages of non-compound higher order allocations, which
2914 * would then underflow the refcount when the caller does the
2915 * required put_page. Don't allow those pages here.
2916 */
2917 if (!kvm_try_get_pfn(pfn))
2918 r = -EFAULT;
2919
2920 out:
2921 pte_unmap_unlock(ptep, ptl);
2922 *p_pfn = pfn;
2923
2924 return r;
2925 }
2926
2927 /*
2928 * Pin guest page in memory and return its pfn.
2929 * @addr: host virtual address which maps memory to the guest
2930 * @atomic: whether this function is forbidden from sleeping
2931 * @interruptible: whether the process can be interrupted by non-fatal signals
2932 * @async: whether this function need to wait IO complete if the
2933 * host page is not in the memory
2934 * @write_fault: whether we should get a writable host page
2935 * @writable: whether it allows to map a writable host page for !@write_fault
2936 *
2937 * The function will map a writable host page for these two cases:
2938 * 1): @write_fault = true
2939 * 2): @write_fault = false && @writable, @writable will tell the caller
2940 * whether the mapping is writable.
2941 */
hva_to_pfn(unsigned long addr,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable)2942 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2943 bool *async, bool write_fault, bool *writable)
2944 {
2945 struct vm_area_struct *vma;
2946 kvm_pfn_t pfn;
2947 int npages, r;
2948
2949 /* we can do it either atomically or asynchronously, not both */
2950 BUG_ON(atomic && async);
2951
2952 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2953 return pfn;
2954
2955 if (atomic)
2956 return KVM_PFN_ERR_FAULT;
2957
2958 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2959 writable, &pfn);
2960 if (npages == 1)
2961 return pfn;
2962 if (npages == -EINTR)
2963 return KVM_PFN_ERR_SIGPENDING;
2964
2965 mmap_read_lock(current->mm);
2966 if (npages == -EHWPOISON ||
2967 (!async && check_user_page_hwpoison(addr))) {
2968 pfn = KVM_PFN_ERR_HWPOISON;
2969 goto exit;
2970 }
2971
2972 retry:
2973 vma = vma_lookup(current->mm, addr);
2974
2975 if (vma == NULL)
2976 pfn = KVM_PFN_ERR_FAULT;
2977 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2978 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2979 if (r == -EAGAIN)
2980 goto retry;
2981 if (r < 0)
2982 pfn = KVM_PFN_ERR_FAULT;
2983 } else {
2984 if (async && vma_is_valid(vma, write_fault))
2985 *async = true;
2986 pfn = KVM_PFN_ERR_FAULT;
2987 }
2988 exit:
2989 mmap_read_unlock(current->mm);
2990 return pfn;
2991 }
2992
__gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable,hva_t * hva)2993 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2994 bool atomic, bool interruptible, bool *async,
2995 bool write_fault, bool *writable, hva_t *hva)
2996 {
2997 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2998
2999 if (hva)
3000 *hva = addr;
3001
3002 if (kvm_is_error_hva(addr)) {
3003 if (writable)
3004 *writable = false;
3005
3006 return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
3007 KVM_PFN_NOSLOT;
3008 }
3009
3010 /* Do not map writable pfn in the readonly memslot. */
3011 if (writable && memslot_is_readonly(slot)) {
3012 *writable = false;
3013 writable = NULL;
3014 }
3015
3016 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3017 writable);
3018 }
3019 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3020
gfn_to_pfn_prot(struct kvm * kvm,gfn_t gfn,bool write_fault,bool * writable)3021 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3022 bool *writable)
3023 {
3024 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3025 NULL, write_fault, writable, NULL);
3026 }
3027 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3028
gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn)3029 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
3030 {
3031 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3032 NULL, NULL);
3033 }
3034 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3035
gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot * slot,gfn_t gfn)3036 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3037 {
3038 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3039 NULL, NULL);
3040 }
3041 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3042
kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu * vcpu,gfn_t gfn)3043 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
3044 {
3045 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3046 }
3047 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
3048
gfn_to_pfn(struct kvm * kvm,gfn_t gfn)3049 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
3050 {
3051 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
3052 }
3053 EXPORT_SYMBOL_GPL(gfn_to_pfn);
3054
kvm_vcpu_gfn_to_pfn(struct kvm_vcpu * vcpu,gfn_t gfn)3055 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
3056 {
3057 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3058 }
3059 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
3060
gfn_to_page_many_atomic(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)3061 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3062 struct page **pages, int nr_pages)
3063 {
3064 unsigned long addr;
3065 gfn_t entry = 0;
3066
3067 addr = gfn_to_hva_many(slot, gfn, &entry);
3068 if (kvm_is_error_hva(addr))
3069 return -1;
3070
3071 if (entry < nr_pages)
3072 return 0;
3073
3074 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3075 }
3076 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3077
3078 /*
3079 * Do not use this helper unless you are absolutely certain the gfn _must_ be
3080 * backed by 'struct page'. A valid example is if the backing memslot is
3081 * controlled by KVM. Note, if the returned page is valid, it's refcount has
3082 * been elevated by gfn_to_pfn().
3083 */
gfn_to_page(struct kvm * kvm,gfn_t gfn)3084 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3085 {
3086 struct page *page;
3087 kvm_pfn_t pfn;
3088
3089 pfn = gfn_to_pfn(kvm, gfn);
3090
3091 if (is_error_noslot_pfn(pfn))
3092 return KVM_ERR_PTR_BAD_PAGE;
3093
3094 page = kvm_pfn_to_refcounted_page(pfn);
3095 if (!page)
3096 return KVM_ERR_PTR_BAD_PAGE;
3097
3098 return page;
3099 }
3100 EXPORT_SYMBOL_GPL(gfn_to_page);
3101
kvm_release_pfn(kvm_pfn_t pfn,bool dirty)3102 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3103 {
3104 if (dirty)
3105 kvm_release_pfn_dirty(pfn);
3106 else
3107 kvm_release_pfn_clean(pfn);
3108 }
3109
kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map)3110 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3111 {
3112 kvm_pfn_t pfn;
3113 void *hva = NULL;
3114 struct page *page = KVM_UNMAPPED_PAGE;
3115
3116 if (!map)
3117 return -EINVAL;
3118
3119 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3120 if (is_error_noslot_pfn(pfn))
3121 return -EINVAL;
3122
3123 if (pfn_valid(pfn)) {
3124 page = pfn_to_page(pfn);
3125 hva = kmap(page);
3126 #ifdef CONFIG_HAS_IOMEM
3127 } else {
3128 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3129 #endif
3130 }
3131
3132 if (!hva)
3133 return -EFAULT;
3134
3135 map->page = page;
3136 map->hva = hva;
3137 map->pfn = pfn;
3138 map->gfn = gfn;
3139
3140 return 0;
3141 }
3142 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3143
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map,bool dirty)3144 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3145 {
3146 if (!map)
3147 return;
3148
3149 if (!map->hva)
3150 return;
3151
3152 if (map->page != KVM_UNMAPPED_PAGE)
3153 kunmap(map->page);
3154 #ifdef CONFIG_HAS_IOMEM
3155 else
3156 memunmap(map->hva);
3157 #endif
3158
3159 if (dirty)
3160 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3161
3162 kvm_release_pfn(map->pfn, dirty);
3163
3164 map->hva = NULL;
3165 map->page = NULL;
3166 }
3167 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3168
kvm_is_ad_tracked_page(struct page * page)3169 static bool kvm_is_ad_tracked_page(struct page *page)
3170 {
3171 /*
3172 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3173 * touched (e.g. set dirty) except by its owner".
3174 */
3175 return !PageReserved(page);
3176 }
3177
kvm_set_page_dirty(struct page * page)3178 static void kvm_set_page_dirty(struct page *page)
3179 {
3180 if (kvm_is_ad_tracked_page(page))
3181 SetPageDirty(page);
3182 }
3183
kvm_set_page_accessed(struct page * page)3184 static void kvm_set_page_accessed(struct page *page)
3185 {
3186 if (kvm_is_ad_tracked_page(page))
3187 mark_page_accessed(page);
3188 }
3189
kvm_release_page_clean(struct page * page)3190 void kvm_release_page_clean(struct page *page)
3191 {
3192 WARN_ON(is_error_page(page));
3193
3194 kvm_set_page_accessed(page);
3195 put_page(page);
3196 }
3197 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3198
kvm_release_pfn_clean(kvm_pfn_t pfn)3199 void kvm_release_pfn_clean(kvm_pfn_t pfn)
3200 {
3201 struct page *page;
3202
3203 if (is_error_noslot_pfn(pfn))
3204 return;
3205
3206 page = kvm_pfn_to_refcounted_page(pfn);
3207 if (!page)
3208 return;
3209
3210 kvm_release_page_clean(page);
3211 }
3212 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3213
kvm_release_page_dirty(struct page * page)3214 void kvm_release_page_dirty(struct page *page)
3215 {
3216 WARN_ON(is_error_page(page));
3217
3218 kvm_set_page_dirty(page);
3219 kvm_release_page_clean(page);
3220 }
3221 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3222
kvm_release_pfn_dirty(kvm_pfn_t pfn)3223 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3224 {
3225 struct page *page;
3226
3227 if (is_error_noslot_pfn(pfn))
3228 return;
3229
3230 page = kvm_pfn_to_refcounted_page(pfn);
3231 if (!page)
3232 return;
3233
3234 kvm_release_page_dirty(page);
3235 }
3236 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3237
3238 /*
3239 * Note, checking for an error/noslot pfn is the caller's responsibility when
3240 * directly marking a page dirty/accessed. Unlike the "release" helpers, the
3241 * "set" helpers are not to be used when the pfn might point at garbage.
3242 */
kvm_set_pfn_dirty(kvm_pfn_t pfn)3243 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3244 {
3245 if (WARN_ON(is_error_noslot_pfn(pfn)))
3246 return;
3247
3248 if (pfn_valid(pfn))
3249 kvm_set_page_dirty(pfn_to_page(pfn));
3250 }
3251 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3252
kvm_set_pfn_accessed(kvm_pfn_t pfn)3253 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3254 {
3255 if (WARN_ON(is_error_noslot_pfn(pfn)))
3256 return;
3257
3258 if (pfn_valid(pfn))
3259 kvm_set_page_accessed(pfn_to_page(pfn));
3260 }
3261 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3262
next_segment(unsigned long len,int offset)3263 static int next_segment(unsigned long len, int offset)
3264 {
3265 if (len > PAGE_SIZE - offset)
3266 return PAGE_SIZE - offset;
3267 else
3268 return len;
3269 }
3270
3271 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)3272 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3273 void *data, int offset, int len)
3274 {
3275 int r;
3276 unsigned long addr;
3277
3278 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3279 if (kvm_is_error_hva(addr))
3280 return -EFAULT;
3281 r = __copy_from_user(data, (void __user *)addr + offset, len);
3282 if (r)
3283 return -EFAULT;
3284 return 0;
3285 }
3286
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)3287 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3288 int len)
3289 {
3290 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3291
3292 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3293 }
3294 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3295
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)3296 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3297 int offset, int len)
3298 {
3299 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3300
3301 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3302 }
3303 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3304
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)3305 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3306 {
3307 gfn_t gfn = gpa >> PAGE_SHIFT;
3308 int seg;
3309 int offset = offset_in_page(gpa);
3310 int ret;
3311
3312 while ((seg = next_segment(len, offset)) != 0) {
3313 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3314 if (ret < 0)
3315 return ret;
3316 offset = 0;
3317 len -= seg;
3318 data += seg;
3319 ++gfn;
3320 }
3321 return 0;
3322 }
3323 EXPORT_SYMBOL_GPL(kvm_read_guest);
3324
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3325 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3326 {
3327 gfn_t gfn = gpa >> PAGE_SHIFT;
3328 int seg;
3329 int offset = offset_in_page(gpa);
3330 int ret;
3331
3332 while ((seg = next_segment(len, offset)) != 0) {
3333 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3334 if (ret < 0)
3335 return ret;
3336 offset = 0;
3337 len -= seg;
3338 data += seg;
3339 ++gfn;
3340 }
3341 return 0;
3342 }
3343 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3344
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)3345 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3346 void *data, int offset, unsigned long len)
3347 {
3348 int r;
3349 unsigned long addr;
3350
3351 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3352 if (kvm_is_error_hva(addr))
3353 return -EFAULT;
3354 pagefault_disable();
3355 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3356 pagefault_enable();
3357 if (r)
3358 return -EFAULT;
3359 return 0;
3360 }
3361
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3362 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3363 void *data, unsigned long len)
3364 {
3365 gfn_t gfn = gpa >> PAGE_SHIFT;
3366 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3367 int offset = offset_in_page(gpa);
3368
3369 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3370 }
3371 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3372
3373 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)3374 static int __kvm_write_guest_page(struct kvm *kvm,
3375 struct kvm_memory_slot *memslot, gfn_t gfn,
3376 const void *data, int offset, int len)
3377 {
3378 int r;
3379 unsigned long addr;
3380
3381 addr = gfn_to_hva_memslot(memslot, gfn);
3382 if (kvm_is_error_hva(addr))
3383 return -EFAULT;
3384 r = __copy_to_user((void __user *)addr + offset, data, len);
3385 if (r)
3386 return -EFAULT;
3387 mark_page_dirty_in_slot(kvm, memslot, gfn);
3388 return 0;
3389 }
3390
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)3391 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3392 const void *data, int offset, int len)
3393 {
3394 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3395
3396 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3397 }
3398 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3399
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)3400 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3401 const void *data, int offset, int len)
3402 {
3403 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3404
3405 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3406 }
3407 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3408
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)3409 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3410 unsigned long len)
3411 {
3412 gfn_t gfn = gpa >> PAGE_SHIFT;
3413 int seg;
3414 int offset = offset_in_page(gpa);
3415 int ret;
3416
3417 while ((seg = next_segment(len, offset)) != 0) {
3418 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3419 if (ret < 0)
3420 return ret;
3421 offset = 0;
3422 len -= seg;
3423 data += seg;
3424 ++gfn;
3425 }
3426 return 0;
3427 }
3428 EXPORT_SYMBOL_GPL(kvm_write_guest);
3429
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)3430 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3431 unsigned long len)
3432 {
3433 gfn_t gfn = gpa >> PAGE_SHIFT;
3434 int seg;
3435 int offset = offset_in_page(gpa);
3436 int ret;
3437
3438 while ((seg = next_segment(len, offset)) != 0) {
3439 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3440 if (ret < 0)
3441 return ret;
3442 offset = 0;
3443 len -= seg;
3444 data += seg;
3445 ++gfn;
3446 }
3447 return 0;
3448 }
3449 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3450
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3451 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3452 struct gfn_to_hva_cache *ghc,
3453 gpa_t gpa, unsigned long len)
3454 {
3455 int offset = offset_in_page(gpa);
3456 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3457 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3458 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3459 gfn_t nr_pages_avail;
3460
3461 /* Update ghc->generation before performing any error checks. */
3462 ghc->generation = slots->generation;
3463
3464 if (start_gfn > end_gfn) {
3465 ghc->hva = KVM_HVA_ERR_BAD;
3466 return -EINVAL;
3467 }
3468
3469 /*
3470 * If the requested region crosses two memslots, we still
3471 * verify that the entire region is valid here.
3472 */
3473 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3474 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3475 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3476 &nr_pages_avail);
3477 if (kvm_is_error_hva(ghc->hva))
3478 return -EFAULT;
3479 }
3480
3481 /* Use the slow path for cross page reads and writes. */
3482 if (nr_pages_needed == 1)
3483 ghc->hva += offset;
3484 else
3485 ghc->memslot = NULL;
3486
3487 ghc->gpa = gpa;
3488 ghc->len = len;
3489 return 0;
3490 }
3491
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3492 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3493 gpa_t gpa, unsigned long len)
3494 {
3495 struct kvm_memslots *slots = kvm_memslots(kvm);
3496 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3497 }
3498 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3499
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3500 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3501 void *data, unsigned int offset,
3502 unsigned long len)
3503 {
3504 struct kvm_memslots *slots = kvm_memslots(kvm);
3505 int r;
3506 gpa_t gpa = ghc->gpa + offset;
3507
3508 if (WARN_ON_ONCE(len + offset > ghc->len))
3509 return -EINVAL;
3510
3511 if (slots->generation != ghc->generation) {
3512 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3513 return -EFAULT;
3514 }
3515
3516 if (kvm_is_error_hva(ghc->hva))
3517 return -EFAULT;
3518
3519 if (unlikely(!ghc->memslot))
3520 return kvm_write_guest(kvm, gpa, data, len);
3521
3522 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3523 if (r)
3524 return -EFAULT;
3525 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3526
3527 return 0;
3528 }
3529 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3530
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3531 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3532 void *data, unsigned long len)
3533 {
3534 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3535 }
3536 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3537
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3538 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3539 void *data, unsigned int offset,
3540 unsigned long len)
3541 {
3542 struct kvm_memslots *slots = kvm_memslots(kvm);
3543 int r;
3544 gpa_t gpa = ghc->gpa + offset;
3545
3546 if (WARN_ON_ONCE(len + offset > ghc->len))
3547 return -EINVAL;
3548
3549 if (slots->generation != ghc->generation) {
3550 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3551 return -EFAULT;
3552 }
3553
3554 if (kvm_is_error_hva(ghc->hva))
3555 return -EFAULT;
3556
3557 if (unlikely(!ghc->memslot))
3558 return kvm_read_guest(kvm, gpa, data, len);
3559
3560 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3561 if (r)
3562 return -EFAULT;
3563
3564 return 0;
3565 }
3566 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3567
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3568 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3569 void *data, unsigned long len)
3570 {
3571 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3572 }
3573 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3574
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3575 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3576 {
3577 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3578 gfn_t gfn = gpa >> PAGE_SHIFT;
3579 int seg;
3580 int offset = offset_in_page(gpa);
3581 int ret;
3582
3583 while ((seg = next_segment(len, offset)) != 0) {
3584 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3585 if (ret < 0)
3586 return ret;
3587 offset = 0;
3588 len -= seg;
3589 ++gfn;
3590 }
3591 return 0;
3592 }
3593 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3594
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)3595 void mark_page_dirty_in_slot(struct kvm *kvm,
3596 const struct kvm_memory_slot *memslot,
3597 gfn_t gfn)
3598 {
3599 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3600
3601 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3602 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3603 return;
3604
3605 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3606 #endif
3607
3608 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3609 unsigned long rel_gfn = gfn - memslot->base_gfn;
3610 u32 slot = (memslot->as_id << 16) | memslot->id;
3611
3612 if (kvm->dirty_ring_size && vcpu)
3613 kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3614 else if (memslot->dirty_bitmap)
3615 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3616 }
3617 }
3618 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3619
mark_page_dirty(struct kvm * kvm,gfn_t gfn)3620 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3621 {
3622 struct kvm_memory_slot *memslot;
3623
3624 memslot = gfn_to_memslot(kvm, gfn);
3625 mark_page_dirty_in_slot(kvm, memslot, gfn);
3626 }
3627 EXPORT_SYMBOL_GPL(mark_page_dirty);
3628
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)3629 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3630 {
3631 struct kvm_memory_slot *memslot;
3632
3633 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3634 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3635 }
3636 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3637
kvm_sigset_activate(struct kvm_vcpu * vcpu)3638 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3639 {
3640 if (!vcpu->sigset_active)
3641 return;
3642
3643 /*
3644 * This does a lockless modification of ->real_blocked, which is fine
3645 * because, only current can change ->real_blocked and all readers of
3646 * ->real_blocked don't care as long ->real_blocked is always a subset
3647 * of ->blocked.
3648 */
3649 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3650 }
3651
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3652 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3653 {
3654 if (!vcpu->sigset_active)
3655 return;
3656
3657 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3658 sigemptyset(¤t->real_blocked);
3659 }
3660
grow_halt_poll_ns(struct kvm_vcpu * vcpu)3661 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3662 {
3663 unsigned int old, val, grow, grow_start;
3664
3665 old = val = vcpu->halt_poll_ns;
3666 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3667 grow = READ_ONCE(halt_poll_ns_grow);
3668 if (!grow)
3669 goto out;
3670
3671 val *= grow;
3672 if (val < grow_start)
3673 val = grow_start;
3674
3675 vcpu->halt_poll_ns = val;
3676 out:
3677 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3678 }
3679
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3680 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3681 {
3682 unsigned int old, val, shrink, grow_start;
3683
3684 old = val = vcpu->halt_poll_ns;
3685 shrink = READ_ONCE(halt_poll_ns_shrink);
3686 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3687 if (shrink == 0)
3688 val = 0;
3689 else
3690 val /= shrink;
3691
3692 if (val < grow_start)
3693 val = 0;
3694
3695 vcpu->halt_poll_ns = val;
3696 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3697 }
3698
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)3699 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3700 {
3701 int ret = -EINTR;
3702 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3703
3704 if (kvm_arch_vcpu_runnable(vcpu))
3705 goto out;
3706 if (kvm_cpu_has_pending_timer(vcpu))
3707 goto out;
3708 if (signal_pending(current))
3709 goto out;
3710 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3711 goto out;
3712
3713 ret = 0;
3714 out:
3715 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3716 return ret;
3717 }
3718
3719 /*
3720 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3721 * pending. This is mostly used when halting a vCPU, but may also be used
3722 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3723 */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3724 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3725 {
3726 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3727 bool waited = false;
3728
3729 vcpu->stat.generic.blocking = 1;
3730
3731 preempt_disable();
3732 kvm_arch_vcpu_blocking(vcpu);
3733 prepare_to_rcuwait(wait);
3734 preempt_enable();
3735
3736 for (;;) {
3737 set_current_state(TASK_INTERRUPTIBLE);
3738
3739 if (kvm_vcpu_check_block(vcpu) < 0)
3740 break;
3741
3742 waited = true;
3743 schedule();
3744 }
3745
3746 preempt_disable();
3747 finish_rcuwait(wait);
3748 kvm_arch_vcpu_unblocking(vcpu);
3749 preempt_enable();
3750
3751 vcpu->stat.generic.blocking = 0;
3752
3753 return waited;
3754 }
3755
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)3756 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3757 ktime_t end, bool success)
3758 {
3759 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3760 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3761
3762 ++vcpu->stat.generic.halt_attempted_poll;
3763
3764 if (success) {
3765 ++vcpu->stat.generic.halt_successful_poll;
3766
3767 if (!vcpu_valid_wakeup(vcpu))
3768 ++vcpu->stat.generic.halt_poll_invalid;
3769
3770 stats->halt_poll_success_ns += poll_ns;
3771 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3772 } else {
3773 stats->halt_poll_fail_ns += poll_ns;
3774 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3775 }
3776 }
3777
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3778 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3779 {
3780 struct kvm *kvm = vcpu->kvm;
3781
3782 if (kvm->override_halt_poll_ns) {
3783 /*
3784 * Ensure kvm->max_halt_poll_ns is not read before
3785 * kvm->override_halt_poll_ns.
3786 *
3787 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3788 */
3789 smp_rmb();
3790 return READ_ONCE(kvm->max_halt_poll_ns);
3791 }
3792
3793 return READ_ONCE(halt_poll_ns);
3794 }
3795
3796 /*
3797 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3798 * polling is enabled, busy wait for a short time before blocking to avoid the
3799 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3800 * is halted.
3801 */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3802 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3803 {
3804 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3805 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3806 ktime_t start, cur, poll_end;
3807 bool waited = false;
3808 bool do_halt_poll;
3809 u64 halt_ns;
3810
3811 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3812 vcpu->halt_poll_ns = max_halt_poll_ns;
3813
3814 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3815
3816 start = cur = poll_end = ktime_get();
3817 if (do_halt_poll) {
3818 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3819
3820 do {
3821 if (kvm_vcpu_check_block(vcpu) < 0)
3822 goto out;
3823 cpu_relax();
3824 poll_end = cur = ktime_get();
3825 } while (kvm_vcpu_can_poll(cur, stop));
3826 }
3827
3828 waited = kvm_vcpu_block(vcpu);
3829
3830 cur = ktime_get();
3831 if (waited) {
3832 vcpu->stat.generic.halt_wait_ns +=
3833 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3834 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3835 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3836 }
3837 out:
3838 /* The total time the vCPU was "halted", including polling time. */
3839 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3840
3841 /*
3842 * Note, halt-polling is considered successful so long as the vCPU was
3843 * never actually scheduled out, i.e. even if the wake event arrived
3844 * after of the halt-polling loop itself, but before the full wait.
3845 */
3846 if (do_halt_poll)
3847 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3848
3849 if (halt_poll_allowed) {
3850 /* Recompute the max halt poll time in case it changed. */
3851 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3852
3853 if (!vcpu_valid_wakeup(vcpu)) {
3854 shrink_halt_poll_ns(vcpu);
3855 } else if (max_halt_poll_ns) {
3856 if (halt_ns <= vcpu->halt_poll_ns)
3857 ;
3858 /* we had a long block, shrink polling */
3859 else if (vcpu->halt_poll_ns &&
3860 halt_ns > max_halt_poll_ns)
3861 shrink_halt_poll_ns(vcpu);
3862 /* we had a short halt and our poll time is too small */
3863 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3864 halt_ns < max_halt_poll_ns)
3865 grow_halt_poll_ns(vcpu);
3866 } else {
3867 vcpu->halt_poll_ns = 0;
3868 }
3869 }
3870
3871 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3872 }
3873 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3874
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3875 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3876 {
3877 if (__kvm_vcpu_wake_up(vcpu)) {
3878 WRITE_ONCE(vcpu->ready, true);
3879 ++vcpu->stat.generic.halt_wakeup;
3880 return true;
3881 }
3882
3883 return false;
3884 }
3885 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3886
3887 #ifndef CONFIG_S390
3888 /*
3889 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3890 */
kvm_vcpu_kick(struct kvm_vcpu * vcpu)3891 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3892 {
3893 int me, cpu;
3894
3895 if (kvm_vcpu_wake_up(vcpu))
3896 return;
3897
3898 me = get_cpu();
3899 /*
3900 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3901 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3902 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3903 * within the vCPU thread itself.
3904 */
3905 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3906 if (vcpu->mode == IN_GUEST_MODE)
3907 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3908 goto out;
3909 }
3910
3911 /*
3912 * Note, the vCPU could get migrated to a different pCPU at any point
3913 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3914 * IPI to the previous pCPU. But, that's ok because the purpose of the
3915 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3916 * vCPU also requires it to leave IN_GUEST_MODE.
3917 */
3918 if (kvm_arch_vcpu_should_kick(vcpu)) {
3919 cpu = READ_ONCE(vcpu->cpu);
3920 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3921 smp_send_reschedule(cpu);
3922 }
3923 out:
3924 put_cpu();
3925 }
3926 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3927 #endif /* !CONFIG_S390 */
3928
kvm_vcpu_yield_to(struct kvm_vcpu * target)3929 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3930 {
3931 struct pid *pid;
3932 struct task_struct *task = NULL;
3933 int ret = 0;
3934
3935 rcu_read_lock();
3936 pid = rcu_dereference(target->pid);
3937 if (pid)
3938 task = get_pid_task(pid, PIDTYPE_PID);
3939 rcu_read_unlock();
3940 if (!task)
3941 return ret;
3942 ret = yield_to(task, 1);
3943 put_task_struct(task);
3944
3945 return ret;
3946 }
3947 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3948
3949 /*
3950 * Helper that checks whether a VCPU is eligible for directed yield.
3951 * Most eligible candidate to yield is decided by following heuristics:
3952 *
3953 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3954 * (preempted lock holder), indicated by @in_spin_loop.
3955 * Set at the beginning and cleared at the end of interception/PLE handler.
3956 *
3957 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3958 * chance last time (mostly it has become eligible now since we have probably
3959 * yielded to lockholder in last iteration. This is done by toggling
3960 * @dy_eligible each time a VCPU checked for eligibility.)
3961 *
3962 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3963 * to preempted lock-holder could result in wrong VCPU selection and CPU
3964 * burning. Giving priority for a potential lock-holder increases lock
3965 * progress.
3966 *
3967 * Since algorithm is based on heuristics, accessing another VCPU data without
3968 * locking does not harm. It may result in trying to yield to same VCPU, fail
3969 * and continue with next VCPU and so on.
3970 */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)3971 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3972 {
3973 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3974 bool eligible;
3975
3976 eligible = !vcpu->spin_loop.in_spin_loop ||
3977 vcpu->spin_loop.dy_eligible;
3978
3979 if (vcpu->spin_loop.in_spin_loop)
3980 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3981
3982 return eligible;
3983 #else
3984 return true;
3985 #endif
3986 }
3987
3988 /*
3989 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3990 * a vcpu_load/vcpu_put pair. However, for most architectures
3991 * kvm_arch_vcpu_runnable does not require vcpu_load.
3992 */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3993 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3994 {
3995 return kvm_arch_vcpu_runnable(vcpu);
3996 }
3997
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3998 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3999 {
4000 if (kvm_arch_dy_runnable(vcpu))
4001 return true;
4002
4003 #ifdef CONFIG_KVM_ASYNC_PF
4004 if (!list_empty_careful(&vcpu->async_pf.done))
4005 return true;
4006 #endif
4007
4008 return false;
4009 }
4010
4011 /*
4012 * By default, simply query the target vCPU's current mode when checking if a
4013 * vCPU was preempted in kernel mode. All architectures except x86 (or more
4014 * specifical, except VMX) allow querying whether or not a vCPU is in kernel
4015 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
4016 * directly for cross-vCPU checks is functionally correct and accurate.
4017 */
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)4018 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
4019 {
4020 return kvm_arch_vcpu_in_kernel(vcpu);
4021 }
4022
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)4023 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4024 {
4025 return false;
4026 }
4027
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)4028 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4029 {
4030 struct kvm *kvm = me->kvm;
4031 struct kvm_vcpu *vcpu;
4032 int last_boosted_vcpu;
4033 unsigned long i;
4034 int yielded = 0;
4035 int try = 3;
4036 int pass;
4037
4038 last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
4039 kvm_vcpu_set_in_spin_loop(me, true);
4040 /*
4041 * We boost the priority of a VCPU that is runnable but not
4042 * currently running, because it got preempted by something
4043 * else and called schedule in __vcpu_run. Hopefully that
4044 * VCPU is holding the lock that we need and will release it.
4045 * We approximate round-robin by starting at the last boosted VCPU.
4046 */
4047 for (pass = 0; pass < 2 && !yielded && try; pass++) {
4048 kvm_for_each_vcpu(i, vcpu, kvm) {
4049 if (!pass && i <= last_boosted_vcpu) {
4050 i = last_boosted_vcpu;
4051 continue;
4052 } else if (pass && i > last_boosted_vcpu)
4053 break;
4054 if (!READ_ONCE(vcpu->ready))
4055 continue;
4056 if (vcpu == me)
4057 continue;
4058 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4059 continue;
4060
4061 /*
4062 * Treat the target vCPU as being in-kernel if it has a
4063 * pending interrupt, as the vCPU trying to yield may
4064 * be spinning waiting on IPI delivery, i.e. the target
4065 * vCPU is in-kernel for the purposes of directed yield.
4066 */
4067 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4068 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4069 !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4070 continue;
4071 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4072 continue;
4073
4074 yielded = kvm_vcpu_yield_to(vcpu);
4075 if (yielded > 0) {
4076 WRITE_ONCE(kvm->last_boosted_vcpu, i);
4077 break;
4078 } else if (yielded < 0) {
4079 try--;
4080 if (!try)
4081 break;
4082 }
4083 }
4084 }
4085 kvm_vcpu_set_in_spin_loop(me, false);
4086
4087 /* Ensure vcpu is not eligible during next spinloop */
4088 kvm_vcpu_set_dy_eligible(me, false);
4089 }
4090 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4091
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)4092 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4093 {
4094 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4095 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4096 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4097 kvm->dirty_ring_size / PAGE_SIZE);
4098 #else
4099 return false;
4100 #endif
4101 }
4102
kvm_vcpu_fault(struct vm_fault * vmf)4103 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4104 {
4105 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4106 struct page *page;
4107
4108 if (vmf->pgoff == 0)
4109 page = virt_to_page(vcpu->run);
4110 #ifdef CONFIG_X86
4111 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4112 page = virt_to_page(vcpu->arch.pio_data);
4113 #endif
4114 #ifdef CONFIG_KVM_MMIO
4115 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4116 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4117 #endif
4118 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4119 page = kvm_dirty_ring_get_page(
4120 &vcpu->dirty_ring,
4121 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4122 else
4123 return kvm_arch_vcpu_fault(vcpu, vmf);
4124 get_page(page);
4125 vmf->page = page;
4126 return 0;
4127 }
4128
4129 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4130 .fault = kvm_vcpu_fault,
4131 };
4132
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)4133 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4134 {
4135 struct kvm_vcpu *vcpu = file->private_data;
4136 unsigned long pages = vma_pages(vma);
4137
4138 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4139 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4140 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4141 return -EINVAL;
4142
4143 vma->vm_ops = &kvm_vcpu_vm_ops;
4144 return 0;
4145 }
4146
kvm_vcpu_release(struct inode * inode,struct file * filp)4147 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4148 {
4149 struct kvm_vcpu *vcpu = filp->private_data;
4150
4151 kvm_put_kvm(vcpu->kvm);
4152 return 0;
4153 }
4154
4155 static struct file_operations kvm_vcpu_fops = {
4156 .release = kvm_vcpu_release,
4157 .unlocked_ioctl = kvm_vcpu_ioctl,
4158 .mmap = kvm_vcpu_mmap,
4159 .llseek = noop_llseek,
4160 KVM_COMPAT(kvm_vcpu_compat_ioctl),
4161 };
4162
4163 /*
4164 * Allocates an inode for the vcpu.
4165 */
create_vcpu_fd(struct kvm_vcpu * vcpu)4166 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4167 {
4168 char name[8 + 1 + ITOA_MAX_LEN + 1];
4169
4170 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4171 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4172 }
4173
4174 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)4175 static int vcpu_get_pid(void *data, u64 *val)
4176 {
4177 struct kvm_vcpu *vcpu = data;
4178
4179 rcu_read_lock();
4180 *val = pid_nr(rcu_dereference(vcpu->pid));
4181 rcu_read_unlock();
4182 return 0;
4183 }
4184
4185 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4186
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)4187 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4188 {
4189 struct dentry *debugfs_dentry;
4190 char dir_name[ITOA_MAX_LEN * 2];
4191
4192 if (!debugfs_initialized())
4193 return;
4194
4195 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4196 debugfs_dentry = debugfs_create_dir(dir_name,
4197 vcpu->kvm->debugfs_dentry);
4198 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4199 &vcpu_get_pid_fops);
4200
4201 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4202 }
4203 #endif
4204
4205 /*
4206 * Creates some virtual cpus. Good luck creating more than one.
4207 */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id)4208 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4209 {
4210 int r;
4211 struct kvm_vcpu *vcpu;
4212 struct page *page;
4213
4214 /*
4215 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4216 * too-large values instead of silently truncating.
4217 *
4218 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4219 * changing the storage type (at the very least, IDs should be tracked
4220 * as unsigned ints).
4221 */
4222 BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4223 if (id >= KVM_MAX_VCPU_IDS)
4224 return -EINVAL;
4225
4226 mutex_lock(&kvm->lock);
4227 if (kvm->created_vcpus >= kvm->max_vcpus) {
4228 mutex_unlock(&kvm->lock);
4229 return -EINVAL;
4230 }
4231
4232 r = kvm_arch_vcpu_precreate(kvm, id);
4233 if (r) {
4234 mutex_unlock(&kvm->lock);
4235 return r;
4236 }
4237
4238 kvm->created_vcpus++;
4239 mutex_unlock(&kvm->lock);
4240
4241 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4242 if (!vcpu) {
4243 r = -ENOMEM;
4244 goto vcpu_decrement;
4245 }
4246
4247 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4248 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4249 if (!page) {
4250 r = -ENOMEM;
4251 goto vcpu_free;
4252 }
4253 vcpu->run = page_address(page);
4254
4255 kvm_vcpu_init(vcpu, kvm, id);
4256
4257 r = kvm_arch_vcpu_create(vcpu);
4258 if (r)
4259 goto vcpu_free_run_page;
4260
4261 if (kvm->dirty_ring_size) {
4262 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4263 id, kvm->dirty_ring_size);
4264 if (r)
4265 goto arch_vcpu_destroy;
4266 }
4267
4268 mutex_lock(&kvm->lock);
4269
4270 #ifdef CONFIG_LOCKDEP
4271 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4272 mutex_lock(&vcpu->mutex);
4273 mutex_unlock(&vcpu->mutex);
4274 #endif
4275
4276 if (kvm_get_vcpu_by_id(kvm, id)) {
4277 r = -EEXIST;
4278 goto unlock_vcpu_destroy;
4279 }
4280
4281 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4282 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4283 if (r)
4284 goto unlock_vcpu_destroy;
4285
4286 /* Now it's all set up, let userspace reach it */
4287 kvm_get_kvm(kvm);
4288 r = create_vcpu_fd(vcpu);
4289 if (r < 0)
4290 goto kvm_put_xa_release;
4291
4292 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4293 r = -EINVAL;
4294 goto kvm_put_xa_release;
4295 }
4296
4297 /*
4298 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
4299 * pointer before kvm->online_vcpu's incremented value.
4300 */
4301 smp_wmb();
4302 atomic_inc(&kvm->online_vcpus);
4303
4304 mutex_unlock(&kvm->lock);
4305 kvm_arch_vcpu_postcreate(vcpu);
4306 kvm_create_vcpu_debugfs(vcpu);
4307 return r;
4308
4309 kvm_put_xa_release:
4310 kvm_put_kvm_no_destroy(kvm);
4311 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4312 unlock_vcpu_destroy:
4313 mutex_unlock(&kvm->lock);
4314 kvm_dirty_ring_free(&vcpu->dirty_ring);
4315 arch_vcpu_destroy:
4316 kvm_arch_vcpu_destroy(vcpu);
4317 vcpu_free_run_page:
4318 free_page((unsigned long)vcpu->run);
4319 vcpu_free:
4320 kmem_cache_free(kvm_vcpu_cache, vcpu);
4321 vcpu_decrement:
4322 mutex_lock(&kvm->lock);
4323 kvm->created_vcpus--;
4324 mutex_unlock(&kvm->lock);
4325 return r;
4326 }
4327
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4328 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4329 {
4330 if (sigset) {
4331 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4332 vcpu->sigset_active = 1;
4333 vcpu->sigset = *sigset;
4334 } else
4335 vcpu->sigset_active = 0;
4336 return 0;
4337 }
4338
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4339 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4340 size_t size, loff_t *offset)
4341 {
4342 struct kvm_vcpu *vcpu = file->private_data;
4343
4344 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4345 &kvm_vcpu_stats_desc[0], &vcpu->stat,
4346 sizeof(vcpu->stat), user_buffer, size, offset);
4347 }
4348
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4349 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4350 {
4351 struct kvm_vcpu *vcpu = file->private_data;
4352
4353 kvm_put_kvm(vcpu->kvm);
4354 return 0;
4355 }
4356
4357 static const struct file_operations kvm_vcpu_stats_fops = {
4358 .owner = THIS_MODULE,
4359 .read = kvm_vcpu_stats_read,
4360 .release = kvm_vcpu_stats_release,
4361 .llseek = noop_llseek,
4362 };
4363
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)4364 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4365 {
4366 int fd;
4367 struct file *file;
4368 char name[15 + ITOA_MAX_LEN + 1];
4369
4370 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4371
4372 fd = get_unused_fd_flags(O_CLOEXEC);
4373 if (fd < 0)
4374 return fd;
4375
4376 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4377 if (IS_ERR(file)) {
4378 put_unused_fd(fd);
4379 return PTR_ERR(file);
4380 }
4381
4382 kvm_get_kvm(vcpu->kvm);
4383
4384 file->f_mode |= FMODE_PREAD;
4385 fd_install(fd, file);
4386
4387 return fd;
4388 }
4389
4390 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
kvm_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4391 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4392 struct kvm_pre_fault_memory *range)
4393 {
4394 int idx;
4395 long r;
4396 u64 full_size;
4397
4398 if (range->flags)
4399 return -EINVAL;
4400
4401 if (!PAGE_ALIGNED(range->gpa) ||
4402 !PAGE_ALIGNED(range->size) ||
4403 range->gpa + range->size <= range->gpa)
4404 return -EINVAL;
4405
4406 vcpu_load(vcpu);
4407 idx = srcu_read_lock(&vcpu->kvm->srcu);
4408
4409 full_size = range->size;
4410 do {
4411 if (signal_pending(current)) {
4412 r = -EINTR;
4413 break;
4414 }
4415
4416 r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4417 if (WARN_ON_ONCE(r == 0 || r == -EIO))
4418 break;
4419
4420 if (r < 0)
4421 break;
4422
4423 range->size -= r;
4424 range->gpa += r;
4425 cond_resched();
4426 } while (range->size);
4427
4428 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4429 vcpu_put(vcpu);
4430
4431 /* Return success if at least one page was mapped successfully. */
4432 return full_size == range->size ? r : 0;
4433 }
4434 #endif
4435
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4436 static long kvm_vcpu_ioctl(struct file *filp,
4437 unsigned int ioctl, unsigned long arg)
4438 {
4439 struct kvm_vcpu *vcpu = filp->private_data;
4440 void __user *argp = (void __user *)arg;
4441 int r;
4442 struct kvm_fpu *fpu = NULL;
4443 struct kvm_sregs *kvm_sregs = NULL;
4444
4445 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4446 return -EIO;
4447
4448 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4449 return -EINVAL;
4450
4451 /*
4452 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4453 * execution; mutex_lock() would break them.
4454 */
4455 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4456 if (r != -ENOIOCTLCMD)
4457 return r;
4458
4459 if (mutex_lock_killable(&vcpu->mutex))
4460 return -EINTR;
4461 switch (ioctl) {
4462 case KVM_RUN: {
4463 struct pid *oldpid;
4464 r = -EINVAL;
4465 if (arg)
4466 goto out;
4467 oldpid = rcu_access_pointer(vcpu->pid);
4468 if (unlikely(oldpid != task_pid(current))) {
4469 /* The thread running this VCPU changed. */
4470 struct pid *newpid;
4471
4472 r = kvm_arch_vcpu_run_pid_change(vcpu);
4473 if (r)
4474 break;
4475
4476 newpid = get_task_pid(current, PIDTYPE_PID);
4477 rcu_assign_pointer(vcpu->pid, newpid);
4478 if (oldpid)
4479 synchronize_rcu();
4480 put_pid(oldpid);
4481 }
4482 vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4483 r = kvm_arch_vcpu_ioctl_run(vcpu);
4484 vcpu->wants_to_run = false;
4485
4486 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4487 break;
4488 }
4489 case KVM_GET_REGS: {
4490 struct kvm_regs *kvm_regs;
4491
4492 r = -ENOMEM;
4493 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
4494 if (!kvm_regs)
4495 goto out;
4496 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4497 if (r)
4498 goto out_free1;
4499 r = -EFAULT;
4500 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4501 goto out_free1;
4502 r = 0;
4503 out_free1:
4504 kfree(kvm_regs);
4505 break;
4506 }
4507 case KVM_SET_REGS: {
4508 struct kvm_regs *kvm_regs;
4509
4510 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4511 if (IS_ERR(kvm_regs)) {
4512 r = PTR_ERR(kvm_regs);
4513 goto out;
4514 }
4515 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4516 kfree(kvm_regs);
4517 break;
4518 }
4519 case KVM_GET_SREGS: {
4520 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
4521 r = -ENOMEM;
4522 if (!kvm_sregs)
4523 goto out;
4524 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4525 if (r)
4526 goto out;
4527 r = -EFAULT;
4528 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4529 goto out;
4530 r = 0;
4531 break;
4532 }
4533 case KVM_SET_SREGS: {
4534 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4535 if (IS_ERR(kvm_sregs)) {
4536 r = PTR_ERR(kvm_sregs);
4537 kvm_sregs = NULL;
4538 goto out;
4539 }
4540 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4541 break;
4542 }
4543 case KVM_GET_MP_STATE: {
4544 struct kvm_mp_state mp_state;
4545
4546 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4547 if (r)
4548 goto out;
4549 r = -EFAULT;
4550 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4551 goto out;
4552 r = 0;
4553 break;
4554 }
4555 case KVM_SET_MP_STATE: {
4556 struct kvm_mp_state mp_state;
4557
4558 r = -EFAULT;
4559 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4560 goto out;
4561 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4562 break;
4563 }
4564 case KVM_TRANSLATE: {
4565 struct kvm_translation tr;
4566
4567 r = -EFAULT;
4568 if (copy_from_user(&tr, argp, sizeof(tr)))
4569 goto out;
4570 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4571 if (r)
4572 goto out;
4573 r = -EFAULT;
4574 if (copy_to_user(argp, &tr, sizeof(tr)))
4575 goto out;
4576 r = 0;
4577 break;
4578 }
4579 case KVM_SET_GUEST_DEBUG: {
4580 struct kvm_guest_debug dbg;
4581
4582 r = -EFAULT;
4583 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4584 goto out;
4585 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4586 break;
4587 }
4588 case KVM_SET_SIGNAL_MASK: {
4589 struct kvm_signal_mask __user *sigmask_arg = argp;
4590 struct kvm_signal_mask kvm_sigmask;
4591 sigset_t sigset, *p;
4592
4593 p = NULL;
4594 if (argp) {
4595 r = -EFAULT;
4596 if (copy_from_user(&kvm_sigmask, argp,
4597 sizeof(kvm_sigmask)))
4598 goto out;
4599 r = -EINVAL;
4600 if (kvm_sigmask.len != sizeof(sigset))
4601 goto out;
4602 r = -EFAULT;
4603 if (copy_from_user(&sigset, sigmask_arg->sigset,
4604 sizeof(sigset)))
4605 goto out;
4606 p = &sigset;
4607 }
4608 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4609 break;
4610 }
4611 case KVM_GET_FPU: {
4612 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
4613 r = -ENOMEM;
4614 if (!fpu)
4615 goto out;
4616 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4617 if (r)
4618 goto out;
4619 r = -EFAULT;
4620 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4621 goto out;
4622 r = 0;
4623 break;
4624 }
4625 case KVM_SET_FPU: {
4626 fpu = memdup_user(argp, sizeof(*fpu));
4627 if (IS_ERR(fpu)) {
4628 r = PTR_ERR(fpu);
4629 fpu = NULL;
4630 goto out;
4631 }
4632 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4633 break;
4634 }
4635 case KVM_GET_STATS_FD: {
4636 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4637 break;
4638 }
4639 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4640 case KVM_PRE_FAULT_MEMORY: {
4641 struct kvm_pre_fault_memory range;
4642
4643 r = -EFAULT;
4644 if (copy_from_user(&range, argp, sizeof(range)))
4645 break;
4646 r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4647 /* Pass back leftover range. */
4648 if (copy_to_user(argp, &range, sizeof(range)))
4649 r = -EFAULT;
4650 break;
4651 }
4652 #endif
4653 default:
4654 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4655 }
4656 out:
4657 mutex_unlock(&vcpu->mutex);
4658 kfree(fpu);
4659 kfree(kvm_sregs);
4660 return r;
4661 }
4662
4663 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4664 static long kvm_vcpu_compat_ioctl(struct file *filp,
4665 unsigned int ioctl, unsigned long arg)
4666 {
4667 struct kvm_vcpu *vcpu = filp->private_data;
4668 void __user *argp = compat_ptr(arg);
4669 int r;
4670
4671 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4672 return -EIO;
4673
4674 switch (ioctl) {
4675 case KVM_SET_SIGNAL_MASK: {
4676 struct kvm_signal_mask __user *sigmask_arg = argp;
4677 struct kvm_signal_mask kvm_sigmask;
4678 sigset_t sigset;
4679
4680 if (argp) {
4681 r = -EFAULT;
4682 if (copy_from_user(&kvm_sigmask, argp,
4683 sizeof(kvm_sigmask)))
4684 goto out;
4685 r = -EINVAL;
4686 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4687 goto out;
4688 r = -EFAULT;
4689 if (get_compat_sigset(&sigset,
4690 (compat_sigset_t __user *)sigmask_arg->sigset))
4691 goto out;
4692 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4693 } else
4694 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4695 break;
4696 }
4697 default:
4698 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4699 }
4700
4701 out:
4702 return r;
4703 }
4704 #endif
4705
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4706 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4707 {
4708 struct kvm_device *dev = filp->private_data;
4709
4710 if (dev->ops->mmap)
4711 return dev->ops->mmap(dev, vma);
4712
4713 return -ENODEV;
4714 }
4715
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4716 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4717 int (*accessor)(struct kvm_device *dev,
4718 struct kvm_device_attr *attr),
4719 unsigned long arg)
4720 {
4721 struct kvm_device_attr attr;
4722
4723 if (!accessor)
4724 return -EPERM;
4725
4726 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4727 return -EFAULT;
4728
4729 return accessor(dev, &attr);
4730 }
4731
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4732 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4733 unsigned long arg)
4734 {
4735 struct kvm_device *dev = filp->private_data;
4736
4737 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4738 return -EIO;
4739
4740 switch (ioctl) {
4741 case KVM_SET_DEVICE_ATTR:
4742 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4743 case KVM_GET_DEVICE_ATTR:
4744 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4745 case KVM_HAS_DEVICE_ATTR:
4746 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4747 default:
4748 if (dev->ops->ioctl)
4749 return dev->ops->ioctl(dev, ioctl, arg);
4750
4751 return -ENOTTY;
4752 }
4753 }
4754
kvm_device_release(struct inode * inode,struct file * filp)4755 static int kvm_device_release(struct inode *inode, struct file *filp)
4756 {
4757 struct kvm_device *dev = filp->private_data;
4758 struct kvm *kvm = dev->kvm;
4759
4760 if (dev->ops->release) {
4761 mutex_lock(&kvm->lock);
4762 list_del_rcu(&dev->vm_node);
4763 synchronize_rcu();
4764 dev->ops->release(dev);
4765 mutex_unlock(&kvm->lock);
4766 }
4767
4768 kvm_put_kvm(kvm);
4769 return 0;
4770 }
4771
4772 static struct file_operations kvm_device_fops = {
4773 .unlocked_ioctl = kvm_device_ioctl,
4774 .release = kvm_device_release,
4775 KVM_COMPAT(kvm_device_ioctl),
4776 .mmap = kvm_device_mmap,
4777 };
4778
kvm_device_from_filp(struct file * filp)4779 struct kvm_device *kvm_device_from_filp(struct file *filp)
4780 {
4781 if (filp->f_op != &kvm_device_fops)
4782 return NULL;
4783
4784 return filp->private_data;
4785 }
4786
4787 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4788 #ifdef CONFIG_KVM_MPIC
4789 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4790 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4791 #endif
4792 };
4793
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4794 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4795 {
4796 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4797 return -ENOSPC;
4798
4799 if (kvm_device_ops_table[type] != NULL)
4800 return -EEXIST;
4801
4802 kvm_device_ops_table[type] = ops;
4803 return 0;
4804 }
4805
kvm_unregister_device_ops(u32 type)4806 void kvm_unregister_device_ops(u32 type)
4807 {
4808 if (kvm_device_ops_table[type] != NULL)
4809 kvm_device_ops_table[type] = NULL;
4810 }
4811
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4812 static int kvm_ioctl_create_device(struct kvm *kvm,
4813 struct kvm_create_device *cd)
4814 {
4815 const struct kvm_device_ops *ops;
4816 struct kvm_device *dev;
4817 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4818 int type;
4819 int ret;
4820
4821 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4822 return -ENODEV;
4823
4824 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4825 ops = kvm_device_ops_table[type];
4826 if (ops == NULL)
4827 return -ENODEV;
4828
4829 if (test)
4830 return 0;
4831
4832 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4833 if (!dev)
4834 return -ENOMEM;
4835
4836 dev->ops = ops;
4837 dev->kvm = kvm;
4838
4839 mutex_lock(&kvm->lock);
4840 ret = ops->create(dev, type);
4841 if (ret < 0) {
4842 mutex_unlock(&kvm->lock);
4843 kfree(dev);
4844 return ret;
4845 }
4846 list_add_rcu(&dev->vm_node, &kvm->devices);
4847 mutex_unlock(&kvm->lock);
4848
4849 if (ops->init)
4850 ops->init(dev);
4851
4852 kvm_get_kvm(kvm);
4853 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4854 if (ret < 0) {
4855 kvm_put_kvm_no_destroy(kvm);
4856 mutex_lock(&kvm->lock);
4857 list_del_rcu(&dev->vm_node);
4858 synchronize_rcu();
4859 if (ops->release)
4860 ops->release(dev);
4861 mutex_unlock(&kvm->lock);
4862 if (ops->destroy)
4863 ops->destroy(dev);
4864 return ret;
4865 }
4866
4867 cd->fd = ret;
4868 return 0;
4869 }
4870
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)4871 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4872 {
4873 switch (arg) {
4874 case KVM_CAP_USER_MEMORY:
4875 case KVM_CAP_USER_MEMORY2:
4876 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4877 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4878 case KVM_CAP_INTERNAL_ERROR_DATA:
4879 #ifdef CONFIG_HAVE_KVM_MSI
4880 case KVM_CAP_SIGNAL_MSI:
4881 #endif
4882 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4883 case KVM_CAP_IRQFD:
4884 #endif
4885 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4886 case KVM_CAP_CHECK_EXTENSION_VM:
4887 case KVM_CAP_ENABLE_CAP_VM:
4888 case KVM_CAP_HALT_POLL:
4889 return 1;
4890 #ifdef CONFIG_KVM_MMIO
4891 case KVM_CAP_COALESCED_MMIO:
4892 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4893 case KVM_CAP_COALESCED_PIO:
4894 return 1;
4895 #endif
4896 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4897 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4898 return KVM_DIRTY_LOG_MANUAL_CAPS;
4899 #endif
4900 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4901 case KVM_CAP_IRQ_ROUTING:
4902 return KVM_MAX_IRQ_ROUTES;
4903 #endif
4904 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4905 case KVM_CAP_MULTI_ADDRESS_SPACE:
4906 if (kvm)
4907 return kvm_arch_nr_memslot_as_ids(kvm);
4908 return KVM_MAX_NR_ADDRESS_SPACES;
4909 #endif
4910 case KVM_CAP_NR_MEMSLOTS:
4911 return KVM_USER_MEM_SLOTS;
4912 case KVM_CAP_DIRTY_LOG_RING:
4913 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4914 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4915 #else
4916 return 0;
4917 #endif
4918 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4919 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4920 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4921 #else
4922 return 0;
4923 #endif
4924 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4925 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4926 #endif
4927 case KVM_CAP_BINARY_STATS_FD:
4928 case KVM_CAP_SYSTEM_EVENT_DATA:
4929 case KVM_CAP_DEVICE_CTRL:
4930 return 1;
4931 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4932 case KVM_CAP_MEMORY_ATTRIBUTES:
4933 return kvm_supported_mem_attributes(kvm);
4934 #endif
4935 #ifdef CONFIG_KVM_PRIVATE_MEM
4936 case KVM_CAP_GUEST_MEMFD:
4937 return !kvm || kvm_arch_has_private_mem(kvm);
4938 #endif
4939 default:
4940 break;
4941 }
4942 return kvm_vm_ioctl_check_extension(kvm, arg);
4943 }
4944
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4945 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4946 {
4947 int r;
4948
4949 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4950 return -EINVAL;
4951
4952 /* the size should be power of 2 */
4953 if (!size || (size & (size - 1)))
4954 return -EINVAL;
4955
4956 /* Should be bigger to keep the reserved entries, or a page */
4957 if (size < kvm_dirty_ring_get_rsvd_entries() *
4958 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4959 return -EINVAL;
4960
4961 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4962 sizeof(struct kvm_dirty_gfn))
4963 return -E2BIG;
4964
4965 /* We only allow it to set once */
4966 if (kvm->dirty_ring_size)
4967 return -EINVAL;
4968
4969 mutex_lock(&kvm->lock);
4970
4971 if (kvm->created_vcpus) {
4972 /* We don't allow to change this value after vcpu created */
4973 r = -EINVAL;
4974 } else {
4975 kvm->dirty_ring_size = size;
4976 r = 0;
4977 }
4978
4979 mutex_unlock(&kvm->lock);
4980 return r;
4981 }
4982
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4983 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4984 {
4985 unsigned long i;
4986 struct kvm_vcpu *vcpu;
4987 int cleared = 0;
4988
4989 if (!kvm->dirty_ring_size)
4990 return -EINVAL;
4991
4992 mutex_lock(&kvm->slots_lock);
4993
4994 kvm_for_each_vcpu(i, vcpu, kvm)
4995 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4996
4997 mutex_unlock(&kvm->slots_lock);
4998
4999 if (cleared)
5000 kvm_flush_remote_tlbs(kvm);
5001
5002 return cleared;
5003 }
5004
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)5005 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5006 struct kvm_enable_cap *cap)
5007 {
5008 return -EINVAL;
5009 }
5010
kvm_are_all_memslots_empty(struct kvm * kvm)5011 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5012 {
5013 int i;
5014
5015 lockdep_assert_held(&kvm->slots_lock);
5016
5017 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5018 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5019 return false;
5020 }
5021
5022 return true;
5023 }
5024 EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
5025
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)5026 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5027 struct kvm_enable_cap *cap)
5028 {
5029 switch (cap->cap) {
5030 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5031 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5032 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5033
5034 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5035 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5036
5037 if (cap->flags || (cap->args[0] & ~allowed_options))
5038 return -EINVAL;
5039 kvm->manual_dirty_log_protect = cap->args[0];
5040 return 0;
5041 }
5042 #endif
5043 case KVM_CAP_HALT_POLL: {
5044 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5045 return -EINVAL;
5046
5047 kvm->max_halt_poll_ns = cap->args[0];
5048
5049 /*
5050 * Ensure kvm->override_halt_poll_ns does not become visible
5051 * before kvm->max_halt_poll_ns.
5052 *
5053 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5054 */
5055 smp_wmb();
5056 kvm->override_halt_poll_ns = true;
5057
5058 return 0;
5059 }
5060 case KVM_CAP_DIRTY_LOG_RING:
5061 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5062 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5063 return -EINVAL;
5064
5065 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5066 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5067 int r = -EINVAL;
5068
5069 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5070 !kvm->dirty_ring_size || cap->flags)
5071 return r;
5072
5073 mutex_lock(&kvm->slots_lock);
5074
5075 /*
5076 * For simplicity, allow enabling ring+bitmap if and only if
5077 * there are no memslots, e.g. to ensure all memslots allocate
5078 * a bitmap after the capability is enabled.
5079 */
5080 if (kvm_are_all_memslots_empty(kvm)) {
5081 kvm->dirty_ring_with_bitmap = true;
5082 r = 0;
5083 }
5084
5085 mutex_unlock(&kvm->slots_lock);
5086
5087 return r;
5088 }
5089 default:
5090 return kvm_vm_ioctl_enable_cap(kvm, cap);
5091 }
5092 }
5093
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)5094 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5095 size_t size, loff_t *offset)
5096 {
5097 struct kvm *kvm = file->private_data;
5098
5099 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5100 &kvm_vm_stats_desc[0], &kvm->stat,
5101 sizeof(kvm->stat), user_buffer, size, offset);
5102 }
5103
kvm_vm_stats_release(struct inode * inode,struct file * file)5104 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5105 {
5106 struct kvm *kvm = file->private_data;
5107
5108 kvm_put_kvm(kvm);
5109 return 0;
5110 }
5111
5112 static const struct file_operations kvm_vm_stats_fops = {
5113 .owner = THIS_MODULE,
5114 .read = kvm_vm_stats_read,
5115 .release = kvm_vm_stats_release,
5116 .llseek = noop_llseek,
5117 };
5118
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)5119 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5120 {
5121 int fd;
5122 struct file *file;
5123
5124 fd = get_unused_fd_flags(O_CLOEXEC);
5125 if (fd < 0)
5126 return fd;
5127
5128 file = anon_inode_getfile("kvm-vm-stats",
5129 &kvm_vm_stats_fops, kvm, O_RDONLY);
5130 if (IS_ERR(file)) {
5131 put_unused_fd(fd);
5132 return PTR_ERR(file);
5133 }
5134
5135 kvm_get_kvm(kvm);
5136
5137 file->f_mode |= FMODE_PREAD;
5138 fd_install(fd, file);
5139
5140 return fd;
5141 }
5142
5143 #define SANITY_CHECK_MEM_REGION_FIELD(field) \
5144 do { \
5145 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5146 offsetof(struct kvm_userspace_memory_region2, field)); \
5147 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5148 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5149 } while (0)
5150
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5151 static long kvm_vm_ioctl(struct file *filp,
5152 unsigned int ioctl, unsigned long arg)
5153 {
5154 struct kvm *kvm = filp->private_data;
5155 void __user *argp = (void __user *)arg;
5156 int r;
5157
5158 if (kvm->mm != current->mm || kvm->vm_dead)
5159 return -EIO;
5160 switch (ioctl) {
5161 case KVM_CREATE_VCPU:
5162 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5163 break;
5164 case KVM_ENABLE_CAP: {
5165 struct kvm_enable_cap cap;
5166
5167 r = -EFAULT;
5168 if (copy_from_user(&cap, argp, sizeof(cap)))
5169 goto out;
5170 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5171 break;
5172 }
5173 case KVM_SET_USER_MEMORY_REGION2:
5174 case KVM_SET_USER_MEMORY_REGION: {
5175 struct kvm_userspace_memory_region2 mem;
5176 unsigned long size;
5177
5178 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5179 /*
5180 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5181 * accessed, but avoid leaking kernel memory in case of a bug.
5182 */
5183 memset(&mem, 0, sizeof(mem));
5184 size = sizeof(struct kvm_userspace_memory_region);
5185 } else {
5186 size = sizeof(struct kvm_userspace_memory_region2);
5187 }
5188
5189 /* Ensure the common parts of the two structs are identical. */
5190 SANITY_CHECK_MEM_REGION_FIELD(slot);
5191 SANITY_CHECK_MEM_REGION_FIELD(flags);
5192 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5193 SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5194 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5195
5196 r = -EFAULT;
5197 if (copy_from_user(&mem, argp, size))
5198 goto out;
5199
5200 r = -EINVAL;
5201 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5202 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5203 goto out;
5204
5205 r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5206 break;
5207 }
5208 case KVM_GET_DIRTY_LOG: {
5209 struct kvm_dirty_log log;
5210
5211 r = -EFAULT;
5212 if (copy_from_user(&log, argp, sizeof(log)))
5213 goto out;
5214 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5215 break;
5216 }
5217 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5218 case KVM_CLEAR_DIRTY_LOG: {
5219 struct kvm_clear_dirty_log log;
5220
5221 r = -EFAULT;
5222 if (copy_from_user(&log, argp, sizeof(log)))
5223 goto out;
5224 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5225 break;
5226 }
5227 #endif
5228 #ifdef CONFIG_KVM_MMIO
5229 case KVM_REGISTER_COALESCED_MMIO: {
5230 struct kvm_coalesced_mmio_zone zone;
5231
5232 r = -EFAULT;
5233 if (copy_from_user(&zone, argp, sizeof(zone)))
5234 goto out;
5235 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5236 break;
5237 }
5238 case KVM_UNREGISTER_COALESCED_MMIO: {
5239 struct kvm_coalesced_mmio_zone zone;
5240
5241 r = -EFAULT;
5242 if (copy_from_user(&zone, argp, sizeof(zone)))
5243 goto out;
5244 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5245 break;
5246 }
5247 #endif
5248 case KVM_IRQFD: {
5249 struct kvm_irqfd data;
5250
5251 r = -EFAULT;
5252 if (copy_from_user(&data, argp, sizeof(data)))
5253 goto out;
5254 r = kvm_irqfd(kvm, &data);
5255 break;
5256 }
5257 case KVM_IOEVENTFD: {
5258 struct kvm_ioeventfd data;
5259
5260 r = -EFAULT;
5261 if (copy_from_user(&data, argp, sizeof(data)))
5262 goto out;
5263 r = kvm_ioeventfd(kvm, &data);
5264 break;
5265 }
5266 #ifdef CONFIG_HAVE_KVM_MSI
5267 case KVM_SIGNAL_MSI: {
5268 struct kvm_msi msi;
5269
5270 r = -EFAULT;
5271 if (copy_from_user(&msi, argp, sizeof(msi)))
5272 goto out;
5273 r = kvm_send_userspace_msi(kvm, &msi);
5274 break;
5275 }
5276 #endif
5277 #ifdef __KVM_HAVE_IRQ_LINE
5278 case KVM_IRQ_LINE_STATUS:
5279 case KVM_IRQ_LINE: {
5280 struct kvm_irq_level irq_event;
5281
5282 r = -EFAULT;
5283 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5284 goto out;
5285
5286 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5287 ioctl == KVM_IRQ_LINE_STATUS);
5288 if (r)
5289 goto out;
5290
5291 r = -EFAULT;
5292 if (ioctl == KVM_IRQ_LINE_STATUS) {
5293 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5294 goto out;
5295 }
5296
5297 r = 0;
5298 break;
5299 }
5300 #endif
5301 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5302 case KVM_SET_GSI_ROUTING: {
5303 struct kvm_irq_routing routing;
5304 struct kvm_irq_routing __user *urouting;
5305 struct kvm_irq_routing_entry *entries = NULL;
5306
5307 r = -EFAULT;
5308 if (copy_from_user(&routing, argp, sizeof(routing)))
5309 goto out;
5310 r = -EINVAL;
5311 if (!kvm_arch_can_set_irq_routing(kvm))
5312 goto out;
5313 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5314 goto out;
5315 if (routing.flags)
5316 goto out;
5317 if (routing.nr) {
5318 urouting = argp;
5319 entries = vmemdup_array_user(urouting->entries,
5320 routing.nr, sizeof(*entries));
5321 if (IS_ERR(entries)) {
5322 r = PTR_ERR(entries);
5323 goto out;
5324 }
5325 }
5326 r = kvm_set_irq_routing(kvm, entries, routing.nr,
5327 routing.flags);
5328 kvfree(entries);
5329 break;
5330 }
5331 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5332 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5333 case KVM_SET_MEMORY_ATTRIBUTES: {
5334 struct kvm_memory_attributes attrs;
5335
5336 r = -EFAULT;
5337 if (copy_from_user(&attrs, argp, sizeof(attrs)))
5338 goto out;
5339
5340 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5341 break;
5342 }
5343 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5344 case KVM_CREATE_DEVICE: {
5345 struct kvm_create_device cd;
5346
5347 r = -EFAULT;
5348 if (copy_from_user(&cd, argp, sizeof(cd)))
5349 goto out;
5350
5351 r = kvm_ioctl_create_device(kvm, &cd);
5352 if (r)
5353 goto out;
5354
5355 r = -EFAULT;
5356 if (copy_to_user(argp, &cd, sizeof(cd)))
5357 goto out;
5358
5359 r = 0;
5360 break;
5361 }
5362 case KVM_CHECK_EXTENSION:
5363 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5364 break;
5365 case KVM_RESET_DIRTY_RINGS:
5366 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5367 break;
5368 case KVM_GET_STATS_FD:
5369 r = kvm_vm_ioctl_get_stats_fd(kvm);
5370 break;
5371 #ifdef CONFIG_KVM_PRIVATE_MEM
5372 case KVM_CREATE_GUEST_MEMFD: {
5373 struct kvm_create_guest_memfd guest_memfd;
5374
5375 r = -EFAULT;
5376 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5377 goto out;
5378
5379 r = kvm_gmem_create(kvm, &guest_memfd);
5380 break;
5381 }
5382 #endif
5383 default:
5384 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5385 }
5386 out:
5387 return r;
5388 }
5389
5390 #ifdef CONFIG_KVM_COMPAT
5391 struct compat_kvm_dirty_log {
5392 __u32 slot;
5393 __u32 padding1;
5394 union {
5395 compat_uptr_t dirty_bitmap; /* one bit per page */
5396 __u64 padding2;
5397 };
5398 };
5399
5400 struct compat_kvm_clear_dirty_log {
5401 __u32 slot;
5402 __u32 num_pages;
5403 __u64 first_page;
5404 union {
5405 compat_uptr_t dirty_bitmap; /* one bit per page */
5406 __u64 padding2;
5407 };
5408 };
5409
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5410 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5411 unsigned long arg)
5412 {
5413 return -ENOTTY;
5414 }
5415
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5416 static long kvm_vm_compat_ioctl(struct file *filp,
5417 unsigned int ioctl, unsigned long arg)
5418 {
5419 struct kvm *kvm = filp->private_data;
5420 int r;
5421
5422 if (kvm->mm != current->mm || kvm->vm_dead)
5423 return -EIO;
5424
5425 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5426 if (r != -ENOTTY)
5427 return r;
5428
5429 switch (ioctl) {
5430 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5431 case KVM_CLEAR_DIRTY_LOG: {
5432 struct compat_kvm_clear_dirty_log compat_log;
5433 struct kvm_clear_dirty_log log;
5434
5435 if (copy_from_user(&compat_log, (void __user *)arg,
5436 sizeof(compat_log)))
5437 return -EFAULT;
5438 log.slot = compat_log.slot;
5439 log.num_pages = compat_log.num_pages;
5440 log.first_page = compat_log.first_page;
5441 log.padding2 = compat_log.padding2;
5442 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5443
5444 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5445 break;
5446 }
5447 #endif
5448 case KVM_GET_DIRTY_LOG: {
5449 struct compat_kvm_dirty_log compat_log;
5450 struct kvm_dirty_log log;
5451
5452 if (copy_from_user(&compat_log, (void __user *)arg,
5453 sizeof(compat_log)))
5454 return -EFAULT;
5455 log.slot = compat_log.slot;
5456 log.padding1 = compat_log.padding1;
5457 log.padding2 = compat_log.padding2;
5458 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5459
5460 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5461 break;
5462 }
5463 default:
5464 r = kvm_vm_ioctl(filp, ioctl, arg);
5465 }
5466 return r;
5467 }
5468 #endif
5469
5470 static struct file_operations kvm_vm_fops = {
5471 .release = kvm_vm_release,
5472 .unlocked_ioctl = kvm_vm_ioctl,
5473 .llseek = noop_llseek,
5474 KVM_COMPAT(kvm_vm_compat_ioctl),
5475 };
5476
file_is_kvm(struct file * file)5477 bool file_is_kvm(struct file *file)
5478 {
5479 return file && file->f_op == &kvm_vm_fops;
5480 }
5481 EXPORT_SYMBOL_GPL(file_is_kvm);
5482
kvm_dev_ioctl_create_vm(unsigned long type)5483 static int kvm_dev_ioctl_create_vm(unsigned long type)
5484 {
5485 char fdname[ITOA_MAX_LEN + 1];
5486 int r, fd;
5487 struct kvm *kvm;
5488 struct file *file;
5489
5490 fd = get_unused_fd_flags(O_CLOEXEC);
5491 if (fd < 0)
5492 return fd;
5493
5494 snprintf(fdname, sizeof(fdname), "%d", fd);
5495
5496 kvm = kvm_create_vm(type, fdname);
5497 if (IS_ERR(kvm)) {
5498 r = PTR_ERR(kvm);
5499 goto put_fd;
5500 }
5501
5502 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5503 if (IS_ERR(file)) {
5504 r = PTR_ERR(file);
5505 goto put_kvm;
5506 }
5507
5508 /*
5509 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5510 * already set, with ->release() being kvm_vm_release(). In error
5511 * cases it will be called by the final fput(file) and will take
5512 * care of doing kvm_put_kvm(kvm).
5513 */
5514 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5515
5516 fd_install(fd, file);
5517 return fd;
5518
5519 put_kvm:
5520 kvm_put_kvm(kvm);
5521 put_fd:
5522 put_unused_fd(fd);
5523 return r;
5524 }
5525
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5526 static long kvm_dev_ioctl(struct file *filp,
5527 unsigned int ioctl, unsigned long arg)
5528 {
5529 int r = -EINVAL;
5530
5531 switch (ioctl) {
5532 case KVM_GET_API_VERSION:
5533 if (arg)
5534 goto out;
5535 r = KVM_API_VERSION;
5536 break;
5537 case KVM_CREATE_VM:
5538 r = kvm_dev_ioctl_create_vm(arg);
5539 break;
5540 case KVM_CHECK_EXTENSION:
5541 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5542 break;
5543 case KVM_GET_VCPU_MMAP_SIZE:
5544 if (arg)
5545 goto out;
5546 r = PAGE_SIZE; /* struct kvm_run */
5547 #ifdef CONFIG_X86
5548 r += PAGE_SIZE; /* pio data page */
5549 #endif
5550 #ifdef CONFIG_KVM_MMIO
5551 r += PAGE_SIZE; /* coalesced mmio ring page */
5552 #endif
5553 break;
5554 default:
5555 return kvm_arch_dev_ioctl(filp, ioctl, arg);
5556 }
5557 out:
5558 return r;
5559 }
5560
5561 static struct file_operations kvm_chardev_ops = {
5562 .unlocked_ioctl = kvm_dev_ioctl,
5563 .llseek = noop_llseek,
5564 KVM_COMPAT(kvm_dev_ioctl),
5565 };
5566
5567 static struct miscdevice kvm_dev = {
5568 KVM_MINOR,
5569 "kvm",
5570 &kvm_chardev_ops,
5571 };
5572
5573 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5574 __visible bool kvm_rebooting;
5575 EXPORT_SYMBOL_GPL(kvm_rebooting);
5576
5577 static DEFINE_PER_CPU(bool, hardware_enabled);
5578 static int kvm_usage_count;
5579
__hardware_enable_nolock(void)5580 static int __hardware_enable_nolock(void)
5581 {
5582 if (__this_cpu_read(hardware_enabled))
5583 return 0;
5584
5585 if (kvm_arch_hardware_enable()) {
5586 pr_info("kvm: enabling virtualization on CPU%d failed\n",
5587 raw_smp_processor_id());
5588 return -EIO;
5589 }
5590
5591 __this_cpu_write(hardware_enabled, true);
5592 return 0;
5593 }
5594
hardware_enable_nolock(void * failed)5595 static void hardware_enable_nolock(void *failed)
5596 {
5597 if (__hardware_enable_nolock())
5598 atomic_inc(failed);
5599 }
5600
kvm_online_cpu(unsigned int cpu)5601 static int kvm_online_cpu(unsigned int cpu)
5602 {
5603 int ret = 0;
5604
5605 /*
5606 * Abort the CPU online process if hardware virtualization cannot
5607 * be enabled. Otherwise running VMs would encounter unrecoverable
5608 * errors when scheduled to this CPU.
5609 */
5610 mutex_lock(&kvm_lock);
5611 if (kvm_usage_count)
5612 ret = __hardware_enable_nolock();
5613 mutex_unlock(&kvm_lock);
5614 return ret;
5615 }
5616
hardware_disable_nolock(void * junk)5617 static void hardware_disable_nolock(void *junk)
5618 {
5619 /*
5620 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
5621 * hardware, not just CPUs that successfully enabled hardware!
5622 */
5623 if (!__this_cpu_read(hardware_enabled))
5624 return;
5625
5626 kvm_arch_hardware_disable();
5627
5628 __this_cpu_write(hardware_enabled, false);
5629 }
5630
kvm_offline_cpu(unsigned int cpu)5631 static int kvm_offline_cpu(unsigned int cpu)
5632 {
5633 mutex_lock(&kvm_lock);
5634 if (kvm_usage_count)
5635 hardware_disable_nolock(NULL);
5636 mutex_unlock(&kvm_lock);
5637 return 0;
5638 }
5639
hardware_disable_all_nolock(void)5640 static void hardware_disable_all_nolock(void)
5641 {
5642 BUG_ON(!kvm_usage_count);
5643
5644 kvm_usage_count--;
5645 if (!kvm_usage_count)
5646 on_each_cpu(hardware_disable_nolock, NULL, 1);
5647 }
5648
hardware_disable_all(void)5649 static void hardware_disable_all(void)
5650 {
5651 cpus_read_lock();
5652 mutex_lock(&kvm_lock);
5653 hardware_disable_all_nolock();
5654 mutex_unlock(&kvm_lock);
5655 cpus_read_unlock();
5656 }
5657
hardware_enable_all(void)5658 static int hardware_enable_all(void)
5659 {
5660 atomic_t failed = ATOMIC_INIT(0);
5661 int r;
5662
5663 /*
5664 * Do not enable hardware virtualization if the system is going down.
5665 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5666 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5667 * after kvm_reboot() is called. Note, this relies on system_state
5668 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
5669 * hook instead of registering a dedicated reboot notifier (the latter
5670 * runs before system_state is updated).
5671 */
5672 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5673 system_state == SYSTEM_RESTART)
5674 return -EBUSY;
5675
5676 /*
5677 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
5678 * is called, and so on_each_cpu() between them includes the CPU that
5679 * is being onlined. As a result, hardware_enable_nolock() may get
5680 * invoked before kvm_online_cpu(), which also enables hardware if the
5681 * usage count is non-zero. Disable CPU hotplug to avoid attempting to
5682 * enable hardware multiple times.
5683 */
5684 cpus_read_lock();
5685 mutex_lock(&kvm_lock);
5686
5687 r = 0;
5688
5689 kvm_usage_count++;
5690 if (kvm_usage_count == 1) {
5691 on_each_cpu(hardware_enable_nolock, &failed, 1);
5692
5693 if (atomic_read(&failed)) {
5694 hardware_disable_all_nolock();
5695 r = -EBUSY;
5696 }
5697 }
5698
5699 mutex_unlock(&kvm_lock);
5700 cpus_read_unlock();
5701
5702 return r;
5703 }
5704
kvm_shutdown(void)5705 static void kvm_shutdown(void)
5706 {
5707 /*
5708 * Disable hardware virtualization and set kvm_rebooting to indicate
5709 * that KVM has asynchronously disabled hardware virtualization, i.e.
5710 * that relevant errors and exceptions aren't entirely unexpected.
5711 * Some flavors of hardware virtualization need to be disabled before
5712 * transferring control to firmware (to perform shutdown/reboot), e.g.
5713 * on x86, virtualization can block INIT interrupts, which are used by
5714 * firmware to pull APs back under firmware control. Note, this path
5715 * is used for both shutdown and reboot scenarios, i.e. neither name is
5716 * 100% comprehensive.
5717 */
5718 pr_info("kvm: exiting hardware virtualization\n");
5719 kvm_rebooting = true;
5720 on_each_cpu(hardware_disable_nolock, NULL, 1);
5721 }
5722
kvm_suspend(void)5723 static int kvm_suspend(void)
5724 {
5725 /*
5726 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5727 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
5728 * is stable. Assert that kvm_lock is not held to ensure the system
5729 * isn't suspended while KVM is enabling hardware. Hardware enabling
5730 * can be preempted, but the task cannot be frozen until it has dropped
5731 * all locks (userspace tasks are frozen via a fake signal).
5732 */
5733 lockdep_assert_not_held(&kvm_lock);
5734 lockdep_assert_irqs_disabled();
5735
5736 if (kvm_usage_count)
5737 hardware_disable_nolock(NULL);
5738 return 0;
5739 }
5740
kvm_resume(void)5741 static void kvm_resume(void)
5742 {
5743 lockdep_assert_not_held(&kvm_lock);
5744 lockdep_assert_irqs_disabled();
5745
5746 if (kvm_usage_count)
5747 WARN_ON_ONCE(__hardware_enable_nolock());
5748 }
5749
5750 static struct syscore_ops kvm_syscore_ops = {
5751 .suspend = kvm_suspend,
5752 .resume = kvm_resume,
5753 .shutdown = kvm_shutdown,
5754 };
5755 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
hardware_enable_all(void)5756 static int hardware_enable_all(void)
5757 {
5758 return 0;
5759 }
5760
hardware_disable_all(void)5761 static void hardware_disable_all(void)
5762 {
5763
5764 }
5765 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5766
kvm_iodevice_destructor(struct kvm_io_device * dev)5767 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5768 {
5769 if (dev->ops->destructor)
5770 dev->ops->destructor(dev);
5771 }
5772
kvm_io_bus_destroy(struct kvm_io_bus * bus)5773 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5774 {
5775 int i;
5776
5777 for (i = 0; i < bus->dev_count; i++) {
5778 struct kvm_io_device *pos = bus->range[i].dev;
5779
5780 kvm_iodevice_destructor(pos);
5781 }
5782 kfree(bus);
5783 }
5784
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5785 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5786 const struct kvm_io_range *r2)
5787 {
5788 gpa_t addr1 = r1->addr;
5789 gpa_t addr2 = r2->addr;
5790
5791 if (addr1 < addr2)
5792 return -1;
5793
5794 /* If r2->len == 0, match the exact address. If r2->len != 0,
5795 * accept any overlapping write. Any order is acceptable for
5796 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5797 * we process all of them.
5798 */
5799 if (r2->len) {
5800 addr1 += r1->len;
5801 addr2 += r2->len;
5802 }
5803
5804 if (addr1 > addr2)
5805 return 1;
5806
5807 return 0;
5808 }
5809
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5810 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5811 {
5812 return kvm_io_bus_cmp(p1, p2);
5813 }
5814
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5815 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5816 gpa_t addr, int len)
5817 {
5818 struct kvm_io_range *range, key;
5819 int off;
5820
5821 key = (struct kvm_io_range) {
5822 .addr = addr,
5823 .len = len,
5824 };
5825
5826 range = bsearch(&key, bus->range, bus->dev_count,
5827 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5828 if (range == NULL)
5829 return -ENOENT;
5830
5831 off = range - bus->range;
5832
5833 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5834 off--;
5835
5836 return off;
5837 }
5838
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5839 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5840 struct kvm_io_range *range, const void *val)
5841 {
5842 int idx;
5843
5844 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5845 if (idx < 0)
5846 return -EOPNOTSUPP;
5847
5848 while (idx < bus->dev_count &&
5849 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5850 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5851 range->len, val))
5852 return idx;
5853 idx++;
5854 }
5855
5856 return -EOPNOTSUPP;
5857 }
5858
5859 /* kvm_io_bus_write - called under kvm->slots_lock */
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5860 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5861 int len, const void *val)
5862 {
5863 struct kvm_io_bus *bus;
5864 struct kvm_io_range range;
5865 int r;
5866
5867 range = (struct kvm_io_range) {
5868 .addr = addr,
5869 .len = len,
5870 };
5871
5872 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5873 if (!bus)
5874 return -ENOMEM;
5875 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5876 return r < 0 ? r : 0;
5877 }
5878 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5879
5880 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5881 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5882 gpa_t addr, int len, const void *val, long cookie)
5883 {
5884 struct kvm_io_bus *bus;
5885 struct kvm_io_range range;
5886
5887 range = (struct kvm_io_range) {
5888 .addr = addr,
5889 .len = len,
5890 };
5891
5892 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5893 if (!bus)
5894 return -ENOMEM;
5895
5896 /* First try the device referenced by cookie. */
5897 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5898 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5899 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5900 val))
5901 return cookie;
5902
5903 /*
5904 * cookie contained garbage; fall back to search and return the
5905 * correct cookie value.
5906 */
5907 return __kvm_io_bus_write(vcpu, bus, &range, val);
5908 }
5909
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5910 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5911 struct kvm_io_range *range, void *val)
5912 {
5913 int idx;
5914
5915 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5916 if (idx < 0)
5917 return -EOPNOTSUPP;
5918
5919 while (idx < bus->dev_count &&
5920 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5921 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5922 range->len, val))
5923 return idx;
5924 idx++;
5925 }
5926
5927 return -EOPNOTSUPP;
5928 }
5929
5930 /* kvm_io_bus_read - called under kvm->slots_lock */
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5931 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5932 int len, void *val)
5933 {
5934 struct kvm_io_bus *bus;
5935 struct kvm_io_range range;
5936 int r;
5937
5938 range = (struct kvm_io_range) {
5939 .addr = addr,
5940 .len = len,
5941 };
5942
5943 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5944 if (!bus)
5945 return -ENOMEM;
5946 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5947 return r < 0 ? r : 0;
5948 }
5949
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5950 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5951 int len, struct kvm_io_device *dev)
5952 {
5953 int i;
5954 struct kvm_io_bus *new_bus, *bus;
5955 struct kvm_io_range range;
5956
5957 lockdep_assert_held(&kvm->slots_lock);
5958
5959 bus = kvm_get_bus(kvm, bus_idx);
5960 if (!bus)
5961 return -ENOMEM;
5962
5963 /* exclude ioeventfd which is limited by maximum fd */
5964 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5965 return -ENOSPC;
5966
5967 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5968 GFP_KERNEL_ACCOUNT);
5969 if (!new_bus)
5970 return -ENOMEM;
5971
5972 range = (struct kvm_io_range) {
5973 .addr = addr,
5974 .len = len,
5975 .dev = dev,
5976 };
5977
5978 for (i = 0; i < bus->dev_count; i++)
5979 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5980 break;
5981
5982 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5983 new_bus->dev_count++;
5984 new_bus->range[i] = range;
5985 memcpy(new_bus->range + i + 1, bus->range + i,
5986 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5987 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5988 synchronize_srcu_expedited(&kvm->srcu);
5989 kfree(bus);
5990
5991 return 0;
5992 }
5993
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)5994 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5995 struct kvm_io_device *dev)
5996 {
5997 int i;
5998 struct kvm_io_bus *new_bus, *bus;
5999
6000 lockdep_assert_held(&kvm->slots_lock);
6001
6002 bus = kvm_get_bus(kvm, bus_idx);
6003 if (!bus)
6004 return 0;
6005
6006 for (i = 0; i < bus->dev_count; i++) {
6007 if (bus->range[i].dev == dev) {
6008 break;
6009 }
6010 }
6011
6012 if (i == bus->dev_count)
6013 return 0;
6014
6015 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
6016 GFP_KERNEL_ACCOUNT);
6017 if (new_bus) {
6018 memcpy(new_bus, bus, struct_size(bus, range, i));
6019 new_bus->dev_count--;
6020 memcpy(new_bus->range + i, bus->range + i + 1,
6021 flex_array_size(new_bus, range, new_bus->dev_count - i));
6022 }
6023
6024 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6025 synchronize_srcu_expedited(&kvm->srcu);
6026
6027 /*
6028 * If NULL bus is installed, destroy the old bus, including all the
6029 * attached devices. Otherwise, destroy the caller's device only.
6030 */
6031 if (!new_bus) {
6032 pr_err("kvm: failed to shrink bus, removing it completely\n");
6033 kvm_io_bus_destroy(bus);
6034 return -ENOMEM;
6035 }
6036
6037 kvm_iodevice_destructor(dev);
6038 kfree(bus);
6039 return 0;
6040 }
6041
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)6042 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6043 gpa_t addr)
6044 {
6045 struct kvm_io_bus *bus;
6046 int dev_idx, srcu_idx;
6047 struct kvm_io_device *iodev = NULL;
6048
6049 srcu_idx = srcu_read_lock(&kvm->srcu);
6050
6051 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6052 if (!bus)
6053 goto out_unlock;
6054
6055 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6056 if (dev_idx < 0)
6057 goto out_unlock;
6058
6059 iodev = bus->range[dev_idx].dev;
6060
6061 out_unlock:
6062 srcu_read_unlock(&kvm->srcu, srcu_idx);
6063
6064 return iodev;
6065 }
6066 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6067
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)6068 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6069 int (*get)(void *, u64 *), int (*set)(void *, u64),
6070 const char *fmt)
6071 {
6072 int ret;
6073 struct kvm_stat_data *stat_data = inode->i_private;
6074
6075 /*
6076 * The debugfs files are a reference to the kvm struct which
6077 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
6078 * avoids the race between open and the removal of the debugfs directory.
6079 */
6080 if (!kvm_get_kvm_safe(stat_data->kvm))
6081 return -ENOENT;
6082
6083 ret = simple_attr_open(inode, file, get,
6084 kvm_stats_debugfs_mode(stat_data->desc) & 0222
6085 ? set : NULL, fmt);
6086 if (ret)
6087 kvm_put_kvm(stat_data->kvm);
6088
6089 return ret;
6090 }
6091
kvm_debugfs_release(struct inode * inode,struct file * file)6092 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6093 {
6094 struct kvm_stat_data *stat_data = inode->i_private;
6095
6096 simple_attr_release(inode, file);
6097 kvm_put_kvm(stat_data->kvm);
6098
6099 return 0;
6100 }
6101
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)6102 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6103 {
6104 *val = *(u64 *)((void *)(&kvm->stat) + offset);
6105
6106 return 0;
6107 }
6108
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)6109 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6110 {
6111 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
6112
6113 return 0;
6114 }
6115
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)6116 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6117 {
6118 unsigned long i;
6119 struct kvm_vcpu *vcpu;
6120
6121 *val = 0;
6122
6123 kvm_for_each_vcpu(i, vcpu, kvm)
6124 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
6125
6126 return 0;
6127 }
6128
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)6129 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6130 {
6131 unsigned long i;
6132 struct kvm_vcpu *vcpu;
6133
6134 kvm_for_each_vcpu(i, vcpu, kvm)
6135 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6136
6137 return 0;
6138 }
6139
kvm_stat_data_get(void * data,u64 * val)6140 static int kvm_stat_data_get(void *data, u64 *val)
6141 {
6142 int r = -EFAULT;
6143 struct kvm_stat_data *stat_data = data;
6144
6145 switch (stat_data->kind) {
6146 case KVM_STAT_VM:
6147 r = kvm_get_stat_per_vm(stat_data->kvm,
6148 stat_data->desc->desc.offset, val);
6149 break;
6150 case KVM_STAT_VCPU:
6151 r = kvm_get_stat_per_vcpu(stat_data->kvm,
6152 stat_data->desc->desc.offset, val);
6153 break;
6154 }
6155
6156 return r;
6157 }
6158
kvm_stat_data_clear(void * data,u64 val)6159 static int kvm_stat_data_clear(void *data, u64 val)
6160 {
6161 int r = -EFAULT;
6162 struct kvm_stat_data *stat_data = data;
6163
6164 if (val)
6165 return -EINVAL;
6166
6167 switch (stat_data->kind) {
6168 case KVM_STAT_VM:
6169 r = kvm_clear_stat_per_vm(stat_data->kvm,
6170 stat_data->desc->desc.offset);
6171 break;
6172 case KVM_STAT_VCPU:
6173 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6174 stat_data->desc->desc.offset);
6175 break;
6176 }
6177
6178 return r;
6179 }
6180
kvm_stat_data_open(struct inode * inode,struct file * file)6181 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6182 {
6183 __simple_attr_check_format("%llu\n", 0ull);
6184 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6185 kvm_stat_data_clear, "%llu\n");
6186 }
6187
6188 static const struct file_operations stat_fops_per_vm = {
6189 .owner = THIS_MODULE,
6190 .open = kvm_stat_data_open,
6191 .release = kvm_debugfs_release,
6192 .read = simple_attr_read,
6193 .write = simple_attr_write,
6194 .llseek = no_llseek,
6195 };
6196
vm_stat_get(void * _offset,u64 * val)6197 static int vm_stat_get(void *_offset, u64 *val)
6198 {
6199 unsigned offset = (long)_offset;
6200 struct kvm *kvm;
6201 u64 tmp_val;
6202
6203 *val = 0;
6204 mutex_lock(&kvm_lock);
6205 list_for_each_entry(kvm, &vm_list, vm_list) {
6206 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6207 *val += tmp_val;
6208 }
6209 mutex_unlock(&kvm_lock);
6210 return 0;
6211 }
6212
vm_stat_clear(void * _offset,u64 val)6213 static int vm_stat_clear(void *_offset, u64 val)
6214 {
6215 unsigned offset = (long)_offset;
6216 struct kvm *kvm;
6217
6218 if (val)
6219 return -EINVAL;
6220
6221 mutex_lock(&kvm_lock);
6222 list_for_each_entry(kvm, &vm_list, vm_list) {
6223 kvm_clear_stat_per_vm(kvm, offset);
6224 }
6225 mutex_unlock(&kvm_lock);
6226
6227 return 0;
6228 }
6229
6230 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6231 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6232
vcpu_stat_get(void * _offset,u64 * val)6233 static int vcpu_stat_get(void *_offset, u64 *val)
6234 {
6235 unsigned offset = (long)_offset;
6236 struct kvm *kvm;
6237 u64 tmp_val;
6238
6239 *val = 0;
6240 mutex_lock(&kvm_lock);
6241 list_for_each_entry(kvm, &vm_list, vm_list) {
6242 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6243 *val += tmp_val;
6244 }
6245 mutex_unlock(&kvm_lock);
6246 return 0;
6247 }
6248
vcpu_stat_clear(void * _offset,u64 val)6249 static int vcpu_stat_clear(void *_offset, u64 val)
6250 {
6251 unsigned offset = (long)_offset;
6252 struct kvm *kvm;
6253
6254 if (val)
6255 return -EINVAL;
6256
6257 mutex_lock(&kvm_lock);
6258 list_for_each_entry(kvm, &vm_list, vm_list) {
6259 kvm_clear_stat_per_vcpu(kvm, offset);
6260 }
6261 mutex_unlock(&kvm_lock);
6262
6263 return 0;
6264 }
6265
6266 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6267 "%llu\n");
6268 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6269
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)6270 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6271 {
6272 struct kobj_uevent_env *env;
6273 unsigned long long created, active;
6274
6275 if (!kvm_dev.this_device || !kvm)
6276 return;
6277
6278 mutex_lock(&kvm_lock);
6279 if (type == KVM_EVENT_CREATE_VM) {
6280 kvm_createvm_count++;
6281 kvm_active_vms++;
6282 } else if (type == KVM_EVENT_DESTROY_VM) {
6283 kvm_active_vms--;
6284 }
6285 created = kvm_createvm_count;
6286 active = kvm_active_vms;
6287 mutex_unlock(&kvm_lock);
6288
6289 env = kzalloc(sizeof(*env), GFP_KERNEL);
6290 if (!env)
6291 return;
6292
6293 add_uevent_var(env, "CREATED=%llu", created);
6294 add_uevent_var(env, "COUNT=%llu", active);
6295
6296 if (type == KVM_EVENT_CREATE_VM) {
6297 add_uevent_var(env, "EVENT=create");
6298 kvm->userspace_pid = task_pid_nr(current);
6299 } else if (type == KVM_EVENT_DESTROY_VM) {
6300 add_uevent_var(env, "EVENT=destroy");
6301 }
6302 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6303
6304 if (!IS_ERR(kvm->debugfs_dentry)) {
6305 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6306
6307 if (p) {
6308 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6309 if (!IS_ERR(tmp))
6310 add_uevent_var(env, "STATS_PATH=%s", tmp);
6311 kfree(p);
6312 }
6313 }
6314 /* no need for checks, since we are adding at most only 5 keys */
6315 env->envp[env->envp_idx++] = NULL;
6316 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6317 kfree(env);
6318 }
6319
kvm_init_debug(void)6320 static void kvm_init_debug(void)
6321 {
6322 const struct file_operations *fops;
6323 const struct _kvm_stats_desc *pdesc;
6324 int i;
6325
6326 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6327
6328 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6329 pdesc = &kvm_vm_stats_desc[i];
6330 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6331 fops = &vm_stat_fops;
6332 else
6333 fops = &vm_stat_readonly_fops;
6334 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6335 kvm_debugfs_dir,
6336 (void *)(long)pdesc->desc.offset, fops);
6337 }
6338
6339 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6340 pdesc = &kvm_vcpu_stats_desc[i];
6341 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6342 fops = &vcpu_stat_fops;
6343 else
6344 fops = &vcpu_stat_readonly_fops;
6345 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6346 kvm_debugfs_dir,
6347 (void *)(long)pdesc->desc.offset, fops);
6348 }
6349 }
6350
6351 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)6352 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6353 {
6354 return container_of(pn, struct kvm_vcpu, preempt_notifier);
6355 }
6356
kvm_sched_in(struct preempt_notifier * pn,int cpu)6357 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6358 {
6359 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6360
6361 WRITE_ONCE(vcpu->preempted, false);
6362 WRITE_ONCE(vcpu->ready, false);
6363
6364 __this_cpu_write(kvm_running_vcpu, vcpu);
6365 kvm_arch_vcpu_load(vcpu, cpu);
6366
6367 WRITE_ONCE(vcpu->scheduled_out, false);
6368 }
6369
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)6370 static void kvm_sched_out(struct preempt_notifier *pn,
6371 struct task_struct *next)
6372 {
6373 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6374
6375 WRITE_ONCE(vcpu->scheduled_out, true);
6376
6377 if (current->on_rq && vcpu->wants_to_run) {
6378 WRITE_ONCE(vcpu->preempted, true);
6379 WRITE_ONCE(vcpu->ready, true);
6380 }
6381 kvm_arch_vcpu_put(vcpu);
6382 __this_cpu_write(kvm_running_vcpu, NULL);
6383 }
6384
6385 /**
6386 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6387 *
6388 * We can disable preemption locally around accessing the per-CPU variable,
6389 * and use the resolved vcpu pointer after enabling preemption again,
6390 * because even if the current thread is migrated to another CPU, reading
6391 * the per-CPU value later will give us the same value as we update the
6392 * per-CPU variable in the preempt notifier handlers.
6393 */
kvm_get_running_vcpu(void)6394 struct kvm_vcpu *kvm_get_running_vcpu(void)
6395 {
6396 struct kvm_vcpu *vcpu;
6397
6398 preempt_disable();
6399 vcpu = __this_cpu_read(kvm_running_vcpu);
6400 preempt_enable();
6401
6402 return vcpu;
6403 }
6404 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6405
6406 /**
6407 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6408 */
kvm_get_running_vcpus(void)6409 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6410 {
6411 return &kvm_running_vcpu;
6412 }
6413
6414 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6415 static unsigned int kvm_guest_state(void)
6416 {
6417 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6418 unsigned int state;
6419
6420 if (!kvm_arch_pmi_in_guest(vcpu))
6421 return 0;
6422
6423 state = PERF_GUEST_ACTIVE;
6424 if (!kvm_arch_vcpu_in_kernel(vcpu))
6425 state |= PERF_GUEST_USER;
6426
6427 return state;
6428 }
6429
kvm_guest_get_ip(void)6430 static unsigned long kvm_guest_get_ip(void)
6431 {
6432 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6433
6434 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6435 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6436 return 0;
6437
6438 return kvm_arch_vcpu_get_ip(vcpu);
6439 }
6440
6441 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6442 .state = kvm_guest_state,
6443 .get_ip = kvm_guest_get_ip,
6444 .handle_intel_pt_intr = NULL,
6445 };
6446
kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void))6447 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6448 {
6449 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6450 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6451 }
kvm_unregister_perf_callbacks(void)6452 void kvm_unregister_perf_callbacks(void)
6453 {
6454 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6455 }
6456 #endif
6457
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)6458 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6459 {
6460 int r;
6461 int cpu;
6462
6463 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6464 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
6465 kvm_online_cpu, kvm_offline_cpu);
6466 if (r)
6467 return r;
6468
6469 register_syscore_ops(&kvm_syscore_ops);
6470 #endif
6471
6472 /* A kmem cache lets us meet the alignment requirements of fx_save. */
6473 if (!vcpu_align)
6474 vcpu_align = __alignof__(struct kvm_vcpu);
6475 kvm_vcpu_cache =
6476 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6477 SLAB_ACCOUNT,
6478 offsetof(struct kvm_vcpu, arch),
6479 offsetofend(struct kvm_vcpu, stats_id)
6480 - offsetof(struct kvm_vcpu, arch),
6481 NULL);
6482 if (!kvm_vcpu_cache) {
6483 r = -ENOMEM;
6484 goto err_vcpu_cache;
6485 }
6486
6487 for_each_possible_cpu(cpu) {
6488 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6489 GFP_KERNEL, cpu_to_node(cpu))) {
6490 r = -ENOMEM;
6491 goto err_cpu_kick_mask;
6492 }
6493 }
6494
6495 r = kvm_irqfd_init();
6496 if (r)
6497 goto err_irqfd;
6498
6499 r = kvm_async_pf_init();
6500 if (r)
6501 goto err_async_pf;
6502
6503 kvm_chardev_ops.owner = module;
6504 kvm_vm_fops.owner = module;
6505 kvm_vcpu_fops.owner = module;
6506 kvm_device_fops.owner = module;
6507
6508 kvm_preempt_ops.sched_in = kvm_sched_in;
6509 kvm_preempt_ops.sched_out = kvm_sched_out;
6510
6511 kvm_init_debug();
6512
6513 r = kvm_vfio_ops_init();
6514 if (WARN_ON_ONCE(r))
6515 goto err_vfio;
6516
6517 kvm_gmem_init(module);
6518
6519 /*
6520 * Registration _must_ be the very last thing done, as this exposes
6521 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6522 */
6523 r = misc_register(&kvm_dev);
6524 if (r) {
6525 pr_err("kvm: misc device register failed\n");
6526 goto err_register;
6527 }
6528
6529 return 0;
6530
6531 err_register:
6532 kvm_vfio_ops_exit();
6533 err_vfio:
6534 kvm_async_pf_deinit();
6535 err_async_pf:
6536 kvm_irqfd_exit();
6537 err_irqfd:
6538 err_cpu_kick_mask:
6539 for_each_possible_cpu(cpu)
6540 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6541 kmem_cache_destroy(kvm_vcpu_cache);
6542 err_vcpu_cache:
6543 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6544 unregister_syscore_ops(&kvm_syscore_ops);
6545 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6546 #endif
6547 return r;
6548 }
6549 EXPORT_SYMBOL_GPL(kvm_init);
6550
kvm_exit(void)6551 void kvm_exit(void)
6552 {
6553 int cpu;
6554
6555 /*
6556 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6557 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6558 * to KVM while the module is being stopped.
6559 */
6560 misc_deregister(&kvm_dev);
6561
6562 debugfs_remove_recursive(kvm_debugfs_dir);
6563 for_each_possible_cpu(cpu)
6564 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6565 kmem_cache_destroy(kvm_vcpu_cache);
6566 kvm_vfio_ops_exit();
6567 kvm_async_pf_deinit();
6568 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6569 unregister_syscore_ops(&kvm_syscore_ops);
6570 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6571 #endif
6572 kvm_irqfd_exit();
6573 }
6574 EXPORT_SYMBOL_GPL(kvm_exit);
6575
6576 struct kvm_vm_worker_thread_context {
6577 struct kvm *kvm;
6578 struct task_struct *parent;
6579 struct completion init_done;
6580 kvm_vm_thread_fn_t thread_fn;
6581 uintptr_t data;
6582 int err;
6583 };
6584
kvm_vm_worker_thread(void * context)6585 static int kvm_vm_worker_thread(void *context)
6586 {
6587 /*
6588 * The init_context is allocated on the stack of the parent thread, so
6589 * we have to locally copy anything that is needed beyond initialization
6590 */
6591 struct kvm_vm_worker_thread_context *init_context = context;
6592 struct task_struct *parent;
6593 struct kvm *kvm = init_context->kvm;
6594 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6595 uintptr_t data = init_context->data;
6596 int err;
6597
6598 err = kthread_park(current);
6599 /* kthread_park(current) is never supposed to return an error */
6600 WARN_ON(err != 0);
6601 if (err)
6602 goto init_complete;
6603
6604 err = cgroup_attach_task_all(init_context->parent, current);
6605 if (err) {
6606 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6607 __func__, err);
6608 goto init_complete;
6609 }
6610
6611 set_user_nice(current, task_nice(init_context->parent));
6612
6613 init_complete:
6614 init_context->err = err;
6615 complete(&init_context->init_done);
6616 init_context = NULL;
6617
6618 if (err)
6619 goto out;
6620
6621 /* Wait to be woken up by the spawner before proceeding. */
6622 kthread_parkme();
6623
6624 if (!kthread_should_stop())
6625 err = thread_fn(kvm, data);
6626
6627 out:
6628 /*
6629 * Move kthread back to its original cgroup to prevent it lingering in
6630 * the cgroup of the VM process, after the latter finishes its
6631 * execution.
6632 *
6633 * kthread_stop() waits on the 'exited' completion condition which is
6634 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6635 * kthread is removed from the cgroup in the cgroup_exit() which is
6636 * called after the exit_mm(). This causes the kthread_stop() to return
6637 * before the kthread actually quits the cgroup.
6638 */
6639 rcu_read_lock();
6640 parent = rcu_dereference(current->real_parent);
6641 get_task_struct(parent);
6642 rcu_read_unlock();
6643 cgroup_attach_task_all(parent, current);
6644 put_task_struct(parent);
6645
6646 return err;
6647 }
6648
kvm_vm_create_worker_thread(struct kvm * kvm,kvm_vm_thread_fn_t thread_fn,uintptr_t data,const char * name,struct task_struct ** thread_ptr)6649 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6650 uintptr_t data, const char *name,
6651 struct task_struct **thread_ptr)
6652 {
6653 struct kvm_vm_worker_thread_context init_context = {};
6654 struct task_struct *thread;
6655
6656 *thread_ptr = NULL;
6657 init_context.kvm = kvm;
6658 init_context.parent = current;
6659 init_context.thread_fn = thread_fn;
6660 init_context.data = data;
6661 init_completion(&init_context.init_done);
6662
6663 thread = kthread_run(kvm_vm_worker_thread, &init_context,
6664 "%s-%d", name, task_pid_nr(current));
6665 if (IS_ERR(thread))
6666 return PTR_ERR(thread);
6667
6668 /* kthread_run is never supposed to return NULL */
6669 WARN_ON(thread == NULL);
6670
6671 wait_for_completion(&init_context.init_done);
6672
6673 if (!init_context.err)
6674 *thread_ptr = thread;
6675
6676 return init_context.err;
6677 }
6678