1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine (KVM) Hypervisor
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 */
12
13 #include <kvm/iodev.h>
14
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 #include <linux/sort.h>
47 #include <linux/bsearch.h>
48 #include <linux/io.h>
49 #include <linux/lockdep.h>
50 #include <linux/kthread.h>
51 #include <linux/suspend.h>
52
53 #include <asm/processor.h>
54 #include <asm/ioctl.h>
55 #include <linux/uaccess.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59 #include "kvm_mm.h"
60 #include "vfio.h"
61
62 #include <trace/events/ipi.h>
63
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66
67 #include <linux/kvm_dirty_ring.h>
68
69
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72
73 MODULE_AUTHOR("Qumranet");
74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
75 MODULE_LICENSE("GPL");
76
77 /* Architectures should define their poll value according to the halt latency */
78 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
79 module_param(halt_poll_ns, uint, 0644);
80 EXPORT_SYMBOL_GPL(halt_poll_ns);
81
82 /* Default doubles per-vcpu halt_poll_ns. */
83 unsigned int halt_poll_ns_grow = 2;
84 module_param(halt_poll_ns_grow, uint, 0644);
85 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
86
87 /* The start value to grow halt_poll_ns from */
88 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
89 module_param(halt_poll_ns_grow_start, uint, 0644);
90 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
91
92 /* Default halves per-vcpu halt_poll_ns. */
93 unsigned int halt_poll_ns_shrink = 2;
94 module_param(halt_poll_ns_shrink, uint, 0644);
95 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
96
97 /*
98 * Ordering of locks:
99 *
100 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
101 */
102
103 DEFINE_MUTEX(kvm_lock);
104 LIST_HEAD(vm_list);
105
106 static struct kmem_cache *kvm_vcpu_cache;
107
108 static __read_mostly struct preempt_ops kvm_preempt_ops;
109 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
110
111 static struct dentry *kvm_debugfs_dir;
112
113 static const struct file_operations stat_fops_per_vm;
114
115 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
116 unsigned long arg);
117 #ifdef CONFIG_KVM_COMPAT
118 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
119 unsigned long arg);
120 #define KVM_COMPAT(c) .compat_ioctl = (c)
121 #else
122 /*
123 * For architectures that don't implement a compat infrastructure,
124 * adopt a double line of defense:
125 * - Prevent a compat task from opening /dev/kvm
126 * - If the open has been done by a 64bit task, and the KVM fd
127 * passed to a compat task, let the ioctls fail.
128 */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)129 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
130 unsigned long arg) { return -EINVAL; }
131
kvm_no_compat_open(struct inode * inode,struct file * file)132 static int kvm_no_compat_open(struct inode *inode, struct file *file)
133 {
134 return is_compat_task() ? -ENODEV : 0;
135 }
136 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
137 .open = kvm_no_compat_open
138 #endif
139 static int kvm_enable_virtualization(void);
140 static void kvm_disable_virtualization(void);
141
142 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
143
144 #define KVM_EVENT_CREATE_VM 0
145 #define KVM_EVENT_DESTROY_VM 1
146 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
147 static unsigned long long kvm_createvm_count;
148 static unsigned long long kvm_active_vms;
149
150 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
151
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)152 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
153 {
154 }
155
kvm_is_zone_device_page(struct page * page)156 bool kvm_is_zone_device_page(struct page *page)
157 {
158 /*
159 * The metadata used by is_zone_device_page() to determine whether or
160 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
161 * the device has been pinned, e.g. by get_user_pages(). WARN if the
162 * page_count() is zero to help detect bad usage of this helper.
163 */
164 if (WARN_ON_ONCE(!page_count(page)))
165 return false;
166
167 return is_zone_device_page(page);
168 }
169
170 /*
171 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
172 * page, NULL otherwise. Note, the list of refcounted PG_reserved page types
173 * is likely incomplete, it has been compiled purely through people wanting to
174 * back guest with a certain type of memory and encountering issues.
175 */
kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)176 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
177 {
178 struct page *page;
179
180 if (!pfn_valid(pfn))
181 return NULL;
182
183 page = pfn_to_page(pfn);
184 if (!PageReserved(page))
185 return page;
186
187 /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
188 if (is_zero_pfn(pfn))
189 return page;
190
191 /*
192 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
193 * perspective they are "normal" pages, albeit with slightly different
194 * usage rules.
195 */
196 if (kvm_is_zone_device_page(page))
197 return page;
198
199 return NULL;
200 }
201
202 /*
203 * Switches to specified vcpu, until a matching vcpu_put()
204 */
vcpu_load(struct kvm_vcpu * vcpu)205 void vcpu_load(struct kvm_vcpu *vcpu)
206 {
207 int cpu = get_cpu();
208
209 __this_cpu_write(kvm_running_vcpu, vcpu);
210 preempt_notifier_register(&vcpu->preempt_notifier);
211 kvm_arch_vcpu_load(vcpu, cpu);
212 put_cpu();
213 }
214 EXPORT_SYMBOL_GPL(vcpu_load);
215
vcpu_put(struct kvm_vcpu * vcpu)216 void vcpu_put(struct kvm_vcpu *vcpu)
217 {
218 preempt_disable();
219 kvm_arch_vcpu_put(vcpu);
220 preempt_notifier_unregister(&vcpu->preempt_notifier);
221 __this_cpu_write(kvm_running_vcpu, NULL);
222 preempt_enable();
223 }
224 EXPORT_SYMBOL_GPL(vcpu_put);
225
226 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228 {
229 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230
231 /*
232 * We need to wait for the VCPU to reenable interrupts and get out of
233 * READING_SHADOW_PAGE_TABLES mode.
234 */
235 if (req & KVM_REQUEST_WAIT)
236 return mode != OUTSIDE_GUEST_MODE;
237
238 /*
239 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 */
241 return mode == IN_GUEST_MODE;
242 }
243
ack_kick(void * _completed)244 static void ack_kick(void *_completed)
245 {
246 }
247
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)248 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
249 {
250 if (cpumask_empty(cpus))
251 return false;
252
253 smp_call_function_many(cpus, ack_kick, NULL, wait);
254 return true;
255 }
256
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)257 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
258 struct cpumask *tmp, int current_cpu)
259 {
260 int cpu;
261
262 if (likely(!(req & KVM_REQUEST_NO_ACTION)))
263 __kvm_make_request(req, vcpu);
264
265 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
266 return;
267
268 /*
269 * Note, the vCPU could get migrated to a different pCPU at any point
270 * after kvm_request_needs_ipi(), which could result in sending an IPI
271 * to the previous pCPU. But, that's OK because the purpose of the IPI
272 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
273 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
274 * after this point is also OK, as the requirement is only that KVM wait
275 * for vCPUs that were reading SPTEs _before_ any changes were
276 * finalized. See kvm_vcpu_kick() for more details on handling requests.
277 */
278 if (kvm_request_needs_ipi(vcpu, req)) {
279 cpu = READ_ONCE(vcpu->cpu);
280 if (cpu != -1 && cpu != current_cpu)
281 __cpumask_set_cpu(cpu, tmp);
282 }
283 }
284
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)285 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
286 unsigned long *vcpu_bitmap)
287 {
288 struct kvm_vcpu *vcpu;
289 struct cpumask *cpus;
290 int i, me;
291 bool called;
292
293 me = get_cpu();
294
295 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
296 cpumask_clear(cpus);
297
298 for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
299 vcpu = kvm_get_vcpu(kvm, i);
300 if (!vcpu)
301 continue;
302 kvm_make_vcpu_request(vcpu, req, cpus, me);
303 }
304
305 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
306 put_cpu();
307
308 return called;
309 }
310
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)311 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
312 {
313 struct kvm_vcpu *vcpu;
314 struct cpumask *cpus;
315 unsigned long i;
316 bool called;
317 int me;
318
319 me = get_cpu();
320
321 cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
322 cpumask_clear(cpus);
323
324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvm_make_vcpu_request(vcpu, req, cpus, me);
326
327 called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
328 put_cpu();
329
330 return called;
331 }
332 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
333
kvm_flush_remote_tlbs(struct kvm * kvm)334 void kvm_flush_remote_tlbs(struct kvm *kvm)
335 {
336 ++kvm->stat.generic.remote_tlb_flush_requests;
337
338 /*
339 * We want to publish modifications to the page tables before reading
340 * mode. Pairs with a memory barrier in arch-specific code.
341 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
342 * and smp_mb in walk_shadow_page_lockless_begin/end.
343 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
344 *
345 * There is already an smp_mb__after_atomic() before
346 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
347 * barrier here.
348 */
349 if (!kvm_arch_flush_remote_tlbs(kvm)
350 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
351 ++kvm->stat.generic.remote_tlb_flush;
352 }
353 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
354
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)355 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
356 {
357 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
358 return;
359
360 /*
361 * Fall back to a flushing entire TLBs if the architecture range-based
362 * TLB invalidation is unsupported or can't be performed for whatever
363 * reason.
364 */
365 kvm_flush_remote_tlbs(kvm);
366 }
367
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)368 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
369 const struct kvm_memory_slot *memslot)
370 {
371 /*
372 * All current use cases for flushing the TLBs for a specific memslot
373 * are related to dirty logging, and many do the TLB flush out of
374 * mmu_lock. The interaction between the various operations on memslot
375 * must be serialized by slots_locks to ensure the TLB flush from one
376 * operation is observed by any other operation on the same memslot.
377 */
378 lockdep_assert_held(&kvm->slots_lock);
379 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
380 }
381
kvm_flush_shadow_all(struct kvm * kvm)382 static void kvm_flush_shadow_all(struct kvm *kvm)
383 {
384 kvm_arch_flush_shadow_all(kvm);
385 kvm_arch_guest_memory_reclaimed(kvm);
386 }
387
388 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)389 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
390 gfp_t gfp_flags)
391 {
392 void *page;
393
394 gfp_flags |= mc->gfp_zero;
395
396 if (mc->kmem_cache)
397 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
398
399 page = (void *)__get_free_page(gfp_flags);
400 if (page && mc->init_value)
401 memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
402 return page;
403 }
404
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)405 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
406 {
407 gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
408 void *obj;
409
410 if (mc->nobjs >= min)
411 return 0;
412
413 if (unlikely(!mc->objects)) {
414 if (WARN_ON_ONCE(!capacity))
415 return -EIO;
416
417 /*
418 * Custom init values can be used only for page allocations,
419 * and obviously conflict with __GFP_ZERO.
420 */
421 if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
422 return -EIO;
423
424 mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
425 if (!mc->objects)
426 return -ENOMEM;
427
428 mc->capacity = capacity;
429 }
430
431 /* It is illegal to request a different capacity across topups. */
432 if (WARN_ON_ONCE(mc->capacity != capacity))
433 return -EIO;
434
435 while (mc->nobjs < mc->capacity) {
436 obj = mmu_memory_cache_alloc_obj(mc, gfp);
437 if (!obj)
438 return mc->nobjs >= min ? 0 : -ENOMEM;
439 mc->objects[mc->nobjs++] = obj;
440 }
441 return 0;
442 }
443
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)444 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
445 {
446 return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
447 }
448
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)449 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
450 {
451 return mc->nobjs;
452 }
453
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)454 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
455 {
456 while (mc->nobjs) {
457 if (mc->kmem_cache)
458 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
459 else
460 free_page((unsigned long)mc->objects[--mc->nobjs]);
461 }
462
463 kvfree(mc->objects);
464
465 mc->objects = NULL;
466 mc->capacity = 0;
467 }
468
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)469 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
470 {
471 void *p;
472
473 if (WARN_ON(!mc->nobjs))
474 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
475 else
476 p = mc->objects[--mc->nobjs];
477 BUG_ON(!p);
478 return p;
479 }
480 #endif
481
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)482 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
483 {
484 mutex_init(&vcpu->mutex);
485 vcpu->cpu = -1;
486 vcpu->kvm = kvm;
487 vcpu->vcpu_id = id;
488 vcpu->pid = NULL;
489 #ifndef __KVM_HAVE_ARCH_WQP
490 rcuwait_init(&vcpu->wait);
491 #endif
492 kvm_async_pf_vcpu_init(vcpu);
493
494 kvm_vcpu_set_in_spin_loop(vcpu, false);
495 kvm_vcpu_set_dy_eligible(vcpu, false);
496 vcpu->preempted = false;
497 vcpu->ready = false;
498 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
499 vcpu->last_used_slot = NULL;
500
501 /* Fill the stats id string for the vcpu */
502 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
503 task_pid_nr(current), id);
504 }
505
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)506 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
507 {
508 kvm_arch_vcpu_destroy(vcpu);
509 kvm_dirty_ring_free(&vcpu->dirty_ring);
510
511 /*
512 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
513 * the vcpu->pid pointer, and at destruction time all file descriptors
514 * are already gone.
515 */
516 put_pid(rcu_dereference_protected(vcpu->pid, 1));
517
518 free_page((unsigned long)vcpu->run);
519 kmem_cache_free(kvm_vcpu_cache, vcpu);
520 }
521
kvm_destroy_vcpus(struct kvm * kvm)522 void kvm_destroy_vcpus(struct kvm *kvm)
523 {
524 unsigned long i;
525 struct kvm_vcpu *vcpu;
526
527 kvm_for_each_vcpu(i, vcpu, kvm) {
528 kvm_vcpu_destroy(vcpu);
529 xa_erase(&kvm->vcpu_array, i);
530 }
531
532 atomic_set(&kvm->online_vcpus, 0);
533 }
534 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
535
536 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
mmu_notifier_to_kvm(struct mmu_notifier * mn)537 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
538 {
539 return container_of(mn, struct kvm, mmu_notifier);
540 }
541
542 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
543
544 typedef void (*on_lock_fn_t)(struct kvm *kvm);
545
546 struct kvm_mmu_notifier_range {
547 /*
548 * 64-bit addresses, as KVM notifiers can operate on host virtual
549 * addresses (unsigned long) and guest physical addresses (64-bit).
550 */
551 u64 start;
552 u64 end;
553 union kvm_mmu_notifier_arg arg;
554 gfn_handler_t handler;
555 on_lock_fn_t on_lock;
556 bool flush_on_ret;
557 bool may_block;
558 };
559
560 /*
561 * The inner-most helper returns a tuple containing the return value from the
562 * arch- and action-specific handler, plus a flag indicating whether or not at
563 * least one memslot was found, i.e. if the handler found guest memory.
564 *
565 * Note, most notifiers are averse to booleans, so even though KVM tracks the
566 * return from arch code as a bool, outer helpers will cast it to an int. :-(
567 */
568 typedef struct kvm_mmu_notifier_return {
569 bool ret;
570 bool found_memslot;
571 } kvm_mn_ret_t;
572
573 /*
574 * Use a dedicated stub instead of NULL to indicate that there is no callback
575 * function/handler. The compiler technically can't guarantee that a real
576 * function will have a non-zero address, and so it will generate code to
577 * check for !NULL, whereas comparing against a stub will be elided at compile
578 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
579 */
kvm_null_fn(void)580 static void kvm_null_fn(void)
581 {
582
583 }
584 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
585
586 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
587 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
588 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
589 node; \
590 node = interval_tree_iter_next(node, start, last)) \
591
__kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range)592 static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
593 const struct kvm_mmu_notifier_range *range)
594 {
595 struct kvm_mmu_notifier_return r = {
596 .ret = false,
597 .found_memslot = false,
598 };
599 struct kvm_gfn_range gfn_range;
600 struct kvm_memory_slot *slot;
601 struct kvm_memslots *slots;
602 int i, idx;
603
604 if (WARN_ON_ONCE(range->end <= range->start))
605 return r;
606
607 /* A null handler is allowed if and only if on_lock() is provided. */
608 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
609 IS_KVM_NULL_FN(range->handler)))
610 return r;
611
612 idx = srcu_read_lock(&kvm->srcu);
613
614 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
615 struct interval_tree_node *node;
616
617 slots = __kvm_memslots(kvm, i);
618 kvm_for_each_memslot_in_hva_range(node, slots,
619 range->start, range->end - 1) {
620 unsigned long hva_start, hva_end;
621
622 slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
623 hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
624 hva_end = min_t(unsigned long, range->end,
625 slot->userspace_addr + (slot->npages << PAGE_SHIFT));
626
627 /*
628 * To optimize for the likely case where the address
629 * range is covered by zero or one memslots, don't
630 * bother making these conditional (to avoid writes on
631 * the second or later invocation of the handler).
632 */
633 gfn_range.arg = range->arg;
634 gfn_range.may_block = range->may_block;
635
636 /*
637 * {gfn(page) | page intersects with [hva_start, hva_end)} =
638 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
639 */
640 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
641 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
642 gfn_range.slot = slot;
643
644 if (!r.found_memslot) {
645 r.found_memslot = true;
646 KVM_MMU_LOCK(kvm);
647 if (!IS_KVM_NULL_FN(range->on_lock))
648 range->on_lock(kvm);
649
650 if (IS_KVM_NULL_FN(range->handler))
651 goto mmu_unlock;
652 }
653 r.ret |= range->handler(kvm, &gfn_range);
654 }
655 }
656
657 if (range->flush_on_ret && r.ret)
658 kvm_flush_remote_tlbs(kvm);
659
660 mmu_unlock:
661 if (r.found_memslot)
662 KVM_MMU_UNLOCK(kvm);
663
664 srcu_read_unlock(&kvm->srcu, idx);
665
666 return r;
667 }
668
kvm_handle_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)669 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
670 unsigned long start,
671 unsigned long end,
672 gfn_handler_t handler)
673 {
674 struct kvm *kvm = mmu_notifier_to_kvm(mn);
675 const struct kvm_mmu_notifier_range range = {
676 .start = start,
677 .end = end,
678 .handler = handler,
679 .on_lock = (void *)kvm_null_fn,
680 .flush_on_ret = true,
681 .may_block = false,
682 };
683
684 return __kvm_handle_hva_range(kvm, &range).ret;
685 }
686
kvm_handle_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)687 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
688 unsigned long start,
689 unsigned long end,
690 gfn_handler_t handler)
691 {
692 struct kvm *kvm = mmu_notifier_to_kvm(mn);
693 const struct kvm_mmu_notifier_range range = {
694 .start = start,
695 .end = end,
696 .handler = handler,
697 .on_lock = (void *)kvm_null_fn,
698 .flush_on_ret = false,
699 .may_block = false,
700 };
701
702 return __kvm_handle_hva_range(kvm, &range).ret;
703 }
704
kvm_mmu_invalidate_begin(struct kvm * kvm)705 void kvm_mmu_invalidate_begin(struct kvm *kvm)
706 {
707 lockdep_assert_held_write(&kvm->mmu_lock);
708 /*
709 * The count increase must become visible at unlock time as no
710 * spte can be established without taking the mmu_lock and
711 * count is also read inside the mmu_lock critical section.
712 */
713 kvm->mmu_invalidate_in_progress++;
714
715 if (likely(kvm->mmu_invalidate_in_progress == 1)) {
716 kvm->mmu_invalidate_range_start = INVALID_GPA;
717 kvm->mmu_invalidate_range_end = INVALID_GPA;
718 }
719 }
720
kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end)721 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
722 {
723 lockdep_assert_held_write(&kvm->mmu_lock);
724
725 WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
726
727 if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
728 kvm->mmu_invalidate_range_start = start;
729 kvm->mmu_invalidate_range_end = end;
730 } else {
731 /*
732 * Fully tracking multiple concurrent ranges has diminishing
733 * returns. Keep things simple and just find the minimal range
734 * which includes the current and new ranges. As there won't be
735 * enough information to subtract a range after its invalidate
736 * completes, any ranges invalidated concurrently will
737 * accumulate and persist until all outstanding invalidates
738 * complete.
739 */
740 kvm->mmu_invalidate_range_start =
741 min(kvm->mmu_invalidate_range_start, start);
742 kvm->mmu_invalidate_range_end =
743 max(kvm->mmu_invalidate_range_end, end);
744 }
745 }
746
kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)747 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
748 {
749 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
750 return kvm_unmap_gfn_range(kvm, range);
751 }
752
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)753 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
754 const struct mmu_notifier_range *range)
755 {
756 struct kvm *kvm = mmu_notifier_to_kvm(mn);
757 const struct kvm_mmu_notifier_range hva_range = {
758 .start = range->start,
759 .end = range->end,
760 .handler = kvm_mmu_unmap_gfn_range,
761 .on_lock = kvm_mmu_invalidate_begin,
762 .flush_on_ret = true,
763 .may_block = mmu_notifier_range_blockable(range),
764 };
765
766 trace_kvm_unmap_hva_range(range->start, range->end);
767
768 /*
769 * Prevent memslot modification between range_start() and range_end()
770 * so that conditionally locking provides the same result in both
771 * functions. Without that guarantee, the mmu_invalidate_in_progress
772 * adjustments will be imbalanced.
773 *
774 * Pairs with the decrement in range_end().
775 */
776 spin_lock(&kvm->mn_invalidate_lock);
777 kvm->mn_active_invalidate_count++;
778 spin_unlock(&kvm->mn_invalidate_lock);
779
780 /*
781 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
782 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
783 * each cache's lock. There are relatively few caches in existence at
784 * any given time, and the caches themselves can check for hva overlap,
785 * i.e. don't need to rely on memslot overlap checks for performance.
786 * Because this runs without holding mmu_lock, the pfn caches must use
787 * mn_active_invalidate_count (see above) instead of
788 * mmu_invalidate_in_progress.
789 */
790 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
791
792 /*
793 * If one or more memslots were found and thus zapped, notify arch code
794 * that guest memory has been reclaimed. This needs to be done *after*
795 * dropping mmu_lock, as x86's reclaim path is slooooow.
796 */
797 if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
798 kvm_arch_guest_memory_reclaimed(kvm);
799
800 return 0;
801 }
802
kvm_mmu_invalidate_end(struct kvm * kvm)803 void kvm_mmu_invalidate_end(struct kvm *kvm)
804 {
805 lockdep_assert_held_write(&kvm->mmu_lock);
806
807 /*
808 * This sequence increase will notify the kvm page fault that
809 * the page that is going to be mapped in the spte could have
810 * been freed.
811 */
812 kvm->mmu_invalidate_seq++;
813 smp_wmb();
814 /*
815 * The above sequence increase must be visible before the
816 * below count decrease, which is ensured by the smp_wmb above
817 * in conjunction with the smp_rmb in mmu_invalidate_retry().
818 */
819 kvm->mmu_invalidate_in_progress--;
820 KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
821
822 /*
823 * Assert that at least one range was added between start() and end().
824 * Not adding a range isn't fatal, but it is a KVM bug.
825 */
826 WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
827 }
828
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)829 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
830 const struct mmu_notifier_range *range)
831 {
832 struct kvm *kvm = mmu_notifier_to_kvm(mn);
833 const struct kvm_mmu_notifier_range hva_range = {
834 .start = range->start,
835 .end = range->end,
836 .handler = (void *)kvm_null_fn,
837 .on_lock = kvm_mmu_invalidate_end,
838 .flush_on_ret = false,
839 .may_block = mmu_notifier_range_blockable(range),
840 };
841 bool wake;
842
843 __kvm_handle_hva_range(kvm, &hva_range);
844
845 /* Pairs with the increment in range_start(). */
846 spin_lock(&kvm->mn_invalidate_lock);
847 if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
848 --kvm->mn_active_invalidate_count;
849 wake = !kvm->mn_active_invalidate_count;
850 spin_unlock(&kvm->mn_invalidate_lock);
851
852 /*
853 * There can only be one waiter, since the wait happens under
854 * slots_lock.
855 */
856 if (wake)
857 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
858 }
859
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)860 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
861 struct mm_struct *mm,
862 unsigned long start,
863 unsigned long end)
864 {
865 trace_kvm_age_hva(start, end);
866
867 return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
868 }
869
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)870 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
871 struct mm_struct *mm,
872 unsigned long start,
873 unsigned long end)
874 {
875 trace_kvm_age_hva(start, end);
876
877 /*
878 * Even though we do not flush TLB, this will still adversely
879 * affect performance on pre-Haswell Intel EPT, where there is
880 * no EPT Access Bit to clear so that we have to tear down EPT
881 * tables instead. If we find this unacceptable, we can always
882 * add a parameter to kvm_age_hva so that it effectively doesn't
883 * do anything on clear_young.
884 *
885 * Also note that currently we never issue secondary TLB flushes
886 * from clear_young, leaving this job up to the regular system
887 * cadence. If we find this inaccurate, we might come up with a
888 * more sophisticated heuristic later.
889 */
890 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
891 }
892
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)893 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
894 struct mm_struct *mm,
895 unsigned long address)
896 {
897 trace_kvm_test_age_hva(address);
898
899 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
900 kvm_test_age_gfn);
901 }
902
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)903 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
904 struct mm_struct *mm)
905 {
906 struct kvm *kvm = mmu_notifier_to_kvm(mn);
907 int idx;
908
909 idx = srcu_read_lock(&kvm->srcu);
910 kvm_flush_shadow_all(kvm);
911 srcu_read_unlock(&kvm->srcu, idx);
912 }
913
914 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
915 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
916 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
917 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
918 .clear_young = kvm_mmu_notifier_clear_young,
919 .test_young = kvm_mmu_notifier_test_young,
920 .release = kvm_mmu_notifier_release,
921 };
922
kvm_init_mmu_notifier(struct kvm * kvm)923 static int kvm_init_mmu_notifier(struct kvm *kvm)
924 {
925 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
926 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
927 }
928
929 #else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
930
kvm_init_mmu_notifier(struct kvm * kvm)931 static int kvm_init_mmu_notifier(struct kvm *kvm)
932 {
933 return 0;
934 }
935
936 #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
937
938 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)939 static int kvm_pm_notifier_call(struct notifier_block *bl,
940 unsigned long state,
941 void *unused)
942 {
943 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
944
945 return kvm_arch_pm_notifier(kvm, state);
946 }
947
kvm_init_pm_notifier(struct kvm * kvm)948 static void kvm_init_pm_notifier(struct kvm *kvm)
949 {
950 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
951 /* Suspend KVM before we suspend ftrace, RCU, etc. */
952 kvm->pm_notifier.priority = INT_MAX;
953 register_pm_notifier(&kvm->pm_notifier);
954 }
955
kvm_destroy_pm_notifier(struct kvm * kvm)956 static void kvm_destroy_pm_notifier(struct kvm *kvm)
957 {
958 unregister_pm_notifier(&kvm->pm_notifier);
959 }
960 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)961 static void kvm_init_pm_notifier(struct kvm *kvm)
962 {
963 }
964
kvm_destroy_pm_notifier(struct kvm * kvm)965 static void kvm_destroy_pm_notifier(struct kvm *kvm)
966 {
967 }
968 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
969
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)970 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
971 {
972 if (!memslot->dirty_bitmap)
973 return;
974
975 vfree(memslot->dirty_bitmap);
976 memslot->dirty_bitmap = NULL;
977 }
978
979 /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)980 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
981 {
982 if (slot->flags & KVM_MEM_GUEST_MEMFD)
983 kvm_gmem_unbind(slot);
984
985 kvm_destroy_dirty_bitmap(slot);
986
987 kvm_arch_free_memslot(kvm, slot);
988
989 kfree(slot);
990 }
991
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)992 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
993 {
994 struct hlist_node *idnode;
995 struct kvm_memory_slot *memslot;
996 int bkt;
997
998 /*
999 * The same memslot objects live in both active and inactive sets,
1000 * arbitrarily free using index '1' so the second invocation of this
1001 * function isn't operating over a structure with dangling pointers
1002 * (even though this function isn't actually touching them).
1003 */
1004 if (!slots->node_idx)
1005 return;
1006
1007 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1008 kvm_free_memslot(kvm, memslot);
1009 }
1010
kvm_stats_debugfs_mode(const struct _kvm_stats_desc * pdesc)1011 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1012 {
1013 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1014 case KVM_STATS_TYPE_INSTANT:
1015 return 0444;
1016 case KVM_STATS_TYPE_CUMULATIVE:
1017 case KVM_STATS_TYPE_PEAK:
1018 default:
1019 return 0644;
1020 }
1021 }
1022
1023
kvm_destroy_vm_debugfs(struct kvm * kvm)1024 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1025 {
1026 int i;
1027 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1028 kvm_vcpu_stats_header.num_desc;
1029
1030 if (IS_ERR(kvm->debugfs_dentry))
1031 return;
1032
1033 debugfs_remove_recursive(kvm->debugfs_dentry);
1034
1035 if (kvm->debugfs_stat_data) {
1036 for (i = 0; i < kvm_debugfs_num_entries; i++)
1037 kfree(kvm->debugfs_stat_data[i]);
1038 kfree(kvm->debugfs_stat_data);
1039 }
1040 }
1041
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)1042 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1043 {
1044 static DEFINE_MUTEX(kvm_debugfs_lock);
1045 struct dentry *dent;
1046 char dir_name[ITOA_MAX_LEN * 2];
1047 struct kvm_stat_data *stat_data;
1048 const struct _kvm_stats_desc *pdesc;
1049 int i, ret = -ENOMEM;
1050 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1051 kvm_vcpu_stats_header.num_desc;
1052
1053 if (!debugfs_initialized())
1054 return 0;
1055
1056 snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1057 mutex_lock(&kvm_debugfs_lock);
1058 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1059 if (dent) {
1060 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1061 dput(dent);
1062 mutex_unlock(&kvm_debugfs_lock);
1063 return 0;
1064 }
1065 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1066 mutex_unlock(&kvm_debugfs_lock);
1067 if (IS_ERR(dent))
1068 return 0;
1069
1070 kvm->debugfs_dentry = dent;
1071 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1072 sizeof(*kvm->debugfs_stat_data),
1073 GFP_KERNEL_ACCOUNT);
1074 if (!kvm->debugfs_stat_data)
1075 goto out_err;
1076
1077 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1078 pdesc = &kvm_vm_stats_desc[i];
1079 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1080 if (!stat_data)
1081 goto out_err;
1082
1083 stat_data->kvm = kvm;
1084 stat_data->desc = pdesc;
1085 stat_data->kind = KVM_STAT_VM;
1086 kvm->debugfs_stat_data[i] = stat_data;
1087 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1088 kvm->debugfs_dentry, stat_data,
1089 &stat_fops_per_vm);
1090 }
1091
1092 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1093 pdesc = &kvm_vcpu_stats_desc[i];
1094 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1095 if (!stat_data)
1096 goto out_err;
1097
1098 stat_data->kvm = kvm;
1099 stat_data->desc = pdesc;
1100 stat_data->kind = KVM_STAT_VCPU;
1101 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1102 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1103 kvm->debugfs_dentry, stat_data,
1104 &stat_fops_per_vm);
1105 }
1106
1107 kvm_arch_create_vm_debugfs(kvm);
1108 return 0;
1109 out_err:
1110 kvm_destroy_vm_debugfs(kvm);
1111 return ret;
1112 }
1113
1114 /*
1115 * Called after the VM is otherwise initialized, but just before adding it to
1116 * the vm_list.
1117 */
kvm_arch_post_init_vm(struct kvm * kvm)1118 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1119 {
1120 return 0;
1121 }
1122
1123 /*
1124 * Called just after removing the VM from the vm_list, but before doing any
1125 * other destruction.
1126 */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1127 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1128 {
1129 }
1130
1131 /*
1132 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1133 * be setup already, so we can create arch-specific debugfs entries under it.
1134 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1135 * a per-arch destroy interface is not needed.
1136 */
kvm_arch_create_vm_debugfs(struct kvm * kvm)1137 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1138 {
1139 }
1140
kvm_create_vm(unsigned long type,const char * fdname)1141 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1142 {
1143 struct kvm *kvm = kvm_arch_alloc_vm();
1144 struct kvm_memslots *slots;
1145 int r, i, j;
1146
1147 if (!kvm)
1148 return ERR_PTR(-ENOMEM);
1149
1150 KVM_MMU_LOCK_INIT(kvm);
1151 mmgrab(current->mm);
1152 kvm->mm = current->mm;
1153 kvm_eventfd_init(kvm);
1154 mutex_init(&kvm->lock);
1155 mutex_init(&kvm->irq_lock);
1156 mutex_init(&kvm->slots_lock);
1157 mutex_init(&kvm->slots_arch_lock);
1158 spin_lock_init(&kvm->mn_invalidate_lock);
1159 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1160 xa_init(&kvm->vcpu_array);
1161 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1162 xa_init(&kvm->mem_attr_array);
1163 #endif
1164
1165 INIT_LIST_HEAD(&kvm->gpc_list);
1166 spin_lock_init(&kvm->gpc_lock);
1167
1168 INIT_LIST_HEAD(&kvm->devices);
1169 kvm->max_vcpus = KVM_MAX_VCPUS;
1170
1171 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1172
1173 /*
1174 * Force subsequent debugfs file creations to fail if the VM directory
1175 * is not created (by kvm_create_vm_debugfs()).
1176 */
1177 kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1178
1179 snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1180 task_pid_nr(current));
1181
1182 r = -ENOMEM;
1183 if (init_srcu_struct(&kvm->srcu))
1184 goto out_err_no_srcu;
1185 if (init_srcu_struct(&kvm->irq_srcu))
1186 goto out_err_no_irq_srcu;
1187
1188 r = kvm_init_irq_routing(kvm);
1189 if (r)
1190 goto out_err_no_irq_routing;
1191
1192 refcount_set(&kvm->users_count, 1);
1193
1194 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1195 for (j = 0; j < 2; j++) {
1196 slots = &kvm->__memslots[i][j];
1197
1198 atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1199 slots->hva_tree = RB_ROOT_CACHED;
1200 slots->gfn_tree = RB_ROOT;
1201 hash_init(slots->id_hash);
1202 slots->node_idx = j;
1203
1204 /* Generations must be different for each address space. */
1205 slots->generation = i;
1206 }
1207
1208 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1209 }
1210
1211 r = -ENOMEM;
1212 for (i = 0; i < KVM_NR_BUSES; i++) {
1213 rcu_assign_pointer(kvm->buses[i],
1214 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1215 if (!kvm->buses[i])
1216 goto out_err_no_arch_destroy_vm;
1217 }
1218
1219 r = kvm_arch_init_vm(kvm, type);
1220 if (r)
1221 goto out_err_no_arch_destroy_vm;
1222
1223 r = kvm_enable_virtualization();
1224 if (r)
1225 goto out_err_no_disable;
1226
1227 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1228 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1229 #endif
1230
1231 r = kvm_init_mmu_notifier(kvm);
1232 if (r)
1233 goto out_err_no_mmu_notifier;
1234
1235 r = kvm_coalesced_mmio_init(kvm);
1236 if (r < 0)
1237 goto out_no_coalesced_mmio;
1238
1239 r = kvm_create_vm_debugfs(kvm, fdname);
1240 if (r)
1241 goto out_err_no_debugfs;
1242
1243 r = kvm_arch_post_init_vm(kvm);
1244 if (r)
1245 goto out_err;
1246
1247 mutex_lock(&kvm_lock);
1248 list_add(&kvm->vm_list, &vm_list);
1249 mutex_unlock(&kvm_lock);
1250
1251 preempt_notifier_inc();
1252 kvm_init_pm_notifier(kvm);
1253
1254 return kvm;
1255
1256 out_err:
1257 kvm_destroy_vm_debugfs(kvm);
1258 out_err_no_debugfs:
1259 kvm_coalesced_mmio_free(kvm);
1260 out_no_coalesced_mmio:
1261 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1262 if (kvm->mmu_notifier.ops)
1263 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1264 #endif
1265 out_err_no_mmu_notifier:
1266 kvm_disable_virtualization();
1267 out_err_no_disable:
1268 kvm_arch_destroy_vm(kvm);
1269 out_err_no_arch_destroy_vm:
1270 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1271 for (i = 0; i < KVM_NR_BUSES; i++)
1272 kfree(kvm_get_bus(kvm, i));
1273 kvm_free_irq_routing(kvm);
1274 out_err_no_irq_routing:
1275 cleanup_srcu_struct(&kvm->irq_srcu);
1276 out_err_no_irq_srcu:
1277 cleanup_srcu_struct(&kvm->srcu);
1278 out_err_no_srcu:
1279 kvm_arch_free_vm(kvm);
1280 mmdrop(current->mm);
1281 return ERR_PTR(r);
1282 }
1283
kvm_destroy_devices(struct kvm * kvm)1284 static void kvm_destroy_devices(struct kvm *kvm)
1285 {
1286 struct kvm_device *dev, *tmp;
1287
1288 /*
1289 * We do not need to take the kvm->lock here, because nobody else
1290 * has a reference to the struct kvm at this point and therefore
1291 * cannot access the devices list anyhow.
1292 *
1293 * The device list is generally managed as an rculist, but list_del()
1294 * is used intentionally here. If a bug in KVM introduced a reader that
1295 * was not backed by a reference on the kvm struct, the hope is that
1296 * it'd consume the poisoned forward pointer instead of suffering a
1297 * use-after-free, even though this cannot be guaranteed.
1298 */
1299 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1300 list_del(&dev->vm_node);
1301 dev->ops->destroy(dev);
1302 }
1303 }
1304
kvm_destroy_vm(struct kvm * kvm)1305 static void kvm_destroy_vm(struct kvm *kvm)
1306 {
1307 int i;
1308 struct mm_struct *mm = kvm->mm;
1309
1310 kvm_destroy_pm_notifier(kvm);
1311 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1312 kvm_destroy_vm_debugfs(kvm);
1313 kvm_arch_sync_events(kvm);
1314 mutex_lock(&kvm_lock);
1315 list_del(&kvm->vm_list);
1316 mutex_unlock(&kvm_lock);
1317 kvm_arch_pre_destroy_vm(kvm);
1318
1319 kvm_free_irq_routing(kvm);
1320 for (i = 0; i < KVM_NR_BUSES; i++) {
1321 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1322
1323 if (bus)
1324 kvm_io_bus_destroy(bus);
1325 kvm->buses[i] = NULL;
1326 }
1327 kvm_coalesced_mmio_free(kvm);
1328 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1329 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1330 /*
1331 * At this point, pending calls to invalidate_range_start()
1332 * have completed but no more MMU notifiers will run, so
1333 * mn_active_invalidate_count may remain unbalanced.
1334 * No threads can be waiting in kvm_swap_active_memslots() as the
1335 * last reference on KVM has been dropped, but freeing
1336 * memslots would deadlock without this manual intervention.
1337 *
1338 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1339 * notifier between a start() and end(), then there shouldn't be any
1340 * in-progress invalidations.
1341 */
1342 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1343 if (kvm->mn_active_invalidate_count)
1344 kvm->mn_active_invalidate_count = 0;
1345 else
1346 WARN_ON(kvm->mmu_invalidate_in_progress);
1347 #else
1348 kvm_flush_shadow_all(kvm);
1349 #endif
1350 kvm_arch_destroy_vm(kvm);
1351 kvm_destroy_devices(kvm);
1352 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1353 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1354 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1355 }
1356 cleanup_srcu_struct(&kvm->irq_srcu);
1357 cleanup_srcu_struct(&kvm->srcu);
1358 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1359 xa_destroy(&kvm->mem_attr_array);
1360 #endif
1361 kvm_arch_free_vm(kvm);
1362 preempt_notifier_dec();
1363 kvm_disable_virtualization();
1364 mmdrop(mm);
1365 }
1366
kvm_get_kvm(struct kvm * kvm)1367 void kvm_get_kvm(struct kvm *kvm)
1368 {
1369 refcount_inc(&kvm->users_count);
1370 }
1371 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1372
1373 /*
1374 * Make sure the vm is not during destruction, which is a safe version of
1375 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1376 */
kvm_get_kvm_safe(struct kvm * kvm)1377 bool kvm_get_kvm_safe(struct kvm *kvm)
1378 {
1379 return refcount_inc_not_zero(&kvm->users_count);
1380 }
1381 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1382
kvm_put_kvm(struct kvm * kvm)1383 void kvm_put_kvm(struct kvm *kvm)
1384 {
1385 if (refcount_dec_and_test(&kvm->users_count))
1386 kvm_destroy_vm(kvm);
1387 }
1388 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1389
1390 /*
1391 * Used to put a reference that was taken on behalf of an object associated
1392 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1393 * of the new file descriptor fails and the reference cannot be transferred to
1394 * its final owner. In such cases, the caller is still actively using @kvm and
1395 * will fail miserably if the refcount unexpectedly hits zero.
1396 */
kvm_put_kvm_no_destroy(struct kvm * kvm)1397 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1398 {
1399 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1400 }
1401 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1402
kvm_vm_release(struct inode * inode,struct file * filp)1403 static int kvm_vm_release(struct inode *inode, struct file *filp)
1404 {
1405 struct kvm *kvm = filp->private_data;
1406
1407 kvm_irqfd_release(kvm);
1408
1409 kvm_put_kvm(kvm);
1410 return 0;
1411 }
1412
1413 /*
1414 * Allocation size is twice as large as the actual dirty bitmap size.
1415 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1416 */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)1417 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1418 {
1419 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1420
1421 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1422 if (!memslot->dirty_bitmap)
1423 return -ENOMEM;
1424
1425 return 0;
1426 }
1427
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)1428 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1429 {
1430 struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1431 int node_idx_inactive = active->node_idx ^ 1;
1432
1433 return &kvm->__memslots[as_id][node_idx_inactive];
1434 }
1435
1436 /*
1437 * Helper to get the address space ID when one of memslot pointers may be NULL.
1438 * This also serves as a sanity that at least one of the pointers is non-NULL,
1439 * and that their address space IDs don't diverge.
1440 */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1441 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1442 struct kvm_memory_slot *b)
1443 {
1444 if (WARN_ON_ONCE(!a && !b))
1445 return 0;
1446
1447 if (!a)
1448 return b->as_id;
1449 if (!b)
1450 return a->as_id;
1451
1452 WARN_ON_ONCE(a->as_id != b->as_id);
1453 return a->as_id;
1454 }
1455
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1456 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1457 struct kvm_memory_slot *slot)
1458 {
1459 struct rb_root *gfn_tree = &slots->gfn_tree;
1460 struct rb_node **node, *parent;
1461 int idx = slots->node_idx;
1462
1463 parent = NULL;
1464 for (node = &gfn_tree->rb_node; *node; ) {
1465 struct kvm_memory_slot *tmp;
1466
1467 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1468 parent = *node;
1469 if (slot->base_gfn < tmp->base_gfn)
1470 node = &(*node)->rb_left;
1471 else if (slot->base_gfn > tmp->base_gfn)
1472 node = &(*node)->rb_right;
1473 else
1474 BUG();
1475 }
1476
1477 rb_link_node(&slot->gfn_node[idx], parent, node);
1478 rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1479 }
1480
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1481 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1482 struct kvm_memory_slot *slot)
1483 {
1484 rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1485 }
1486
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1487 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1488 struct kvm_memory_slot *old,
1489 struct kvm_memory_slot *new)
1490 {
1491 int idx = slots->node_idx;
1492
1493 WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1494
1495 rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1496 &slots->gfn_tree);
1497 }
1498
1499 /*
1500 * Replace @old with @new in the inactive memslots.
1501 *
1502 * With NULL @old this simply adds @new.
1503 * With NULL @new this simply removes @old.
1504 *
1505 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1506 * appropriately.
1507 */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1508 static void kvm_replace_memslot(struct kvm *kvm,
1509 struct kvm_memory_slot *old,
1510 struct kvm_memory_slot *new)
1511 {
1512 int as_id = kvm_memslots_get_as_id(old, new);
1513 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1514 int idx = slots->node_idx;
1515
1516 if (old) {
1517 hash_del(&old->id_node[idx]);
1518 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1519
1520 if ((long)old == atomic_long_read(&slots->last_used_slot))
1521 atomic_long_set(&slots->last_used_slot, (long)new);
1522
1523 if (!new) {
1524 kvm_erase_gfn_node(slots, old);
1525 return;
1526 }
1527 }
1528
1529 /*
1530 * Initialize @new's hva range. Do this even when replacing an @old
1531 * slot, kvm_copy_memslot() deliberately does not touch node data.
1532 */
1533 new->hva_node[idx].start = new->userspace_addr;
1534 new->hva_node[idx].last = new->userspace_addr +
1535 (new->npages << PAGE_SHIFT) - 1;
1536
1537 /*
1538 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1539 * hva_node needs to be swapped with remove+insert even though hva can't
1540 * change when replacing an existing slot.
1541 */
1542 hash_add(slots->id_hash, &new->id_node[idx], new->id);
1543 interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1544
1545 /*
1546 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1547 * switch the node in the gfn tree instead of removing the old and
1548 * inserting the new as two separate operations. Replacement is a
1549 * single O(1) operation versus two O(log(n)) operations for
1550 * remove+insert.
1551 */
1552 if (old && old->base_gfn == new->base_gfn) {
1553 kvm_replace_gfn_node(slots, old, new);
1554 } else {
1555 if (old)
1556 kvm_erase_gfn_node(slots, old);
1557 kvm_insert_gfn_node(slots, new);
1558 }
1559 }
1560
1561 /*
1562 * Flags that do not access any of the extra space of struct
1563 * kvm_userspace_memory_region2. KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1564 * only allows these.
1565 */
1566 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1567 (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1568
check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1569 static int check_memory_region_flags(struct kvm *kvm,
1570 const struct kvm_userspace_memory_region2 *mem)
1571 {
1572 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1573
1574 if (kvm_arch_has_private_mem(kvm))
1575 valid_flags |= KVM_MEM_GUEST_MEMFD;
1576
1577 /* Dirty logging private memory is not currently supported. */
1578 if (mem->flags & KVM_MEM_GUEST_MEMFD)
1579 valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1580
1581 /*
1582 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1583 * read-only memslots have emulated MMIO, not page fault, semantics,
1584 * and KVM doesn't allow emulated MMIO for private memory.
1585 */
1586 if (kvm_arch_has_readonly_mem(kvm) &&
1587 !(mem->flags & KVM_MEM_GUEST_MEMFD))
1588 valid_flags |= KVM_MEM_READONLY;
1589
1590 if (mem->flags & ~valid_flags)
1591 return -EINVAL;
1592
1593 return 0;
1594 }
1595
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1596 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1597 {
1598 struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1599
1600 /* Grab the generation from the activate memslots. */
1601 u64 gen = __kvm_memslots(kvm, as_id)->generation;
1602
1603 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1604 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1605
1606 /*
1607 * Do not store the new memslots while there are invalidations in
1608 * progress, otherwise the locking in invalidate_range_start and
1609 * invalidate_range_end will be unbalanced.
1610 */
1611 spin_lock(&kvm->mn_invalidate_lock);
1612 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1613 while (kvm->mn_active_invalidate_count) {
1614 set_current_state(TASK_UNINTERRUPTIBLE);
1615 spin_unlock(&kvm->mn_invalidate_lock);
1616 schedule();
1617 spin_lock(&kvm->mn_invalidate_lock);
1618 }
1619 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1620 rcu_assign_pointer(kvm->memslots[as_id], slots);
1621 spin_unlock(&kvm->mn_invalidate_lock);
1622
1623 /*
1624 * Acquired in kvm_set_memslot. Must be released before synchronize
1625 * SRCU below in order to avoid deadlock with another thread
1626 * acquiring the slots_arch_lock in an srcu critical section.
1627 */
1628 mutex_unlock(&kvm->slots_arch_lock);
1629
1630 synchronize_srcu_expedited(&kvm->srcu);
1631
1632 /*
1633 * Increment the new memslot generation a second time, dropping the
1634 * update in-progress flag and incrementing the generation based on
1635 * the number of address spaces. This provides a unique and easily
1636 * identifiable generation number while the memslots are in flux.
1637 */
1638 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1639
1640 /*
1641 * Generations must be unique even across address spaces. We do not need
1642 * a global counter for that, instead the generation space is evenly split
1643 * across address spaces. For example, with two address spaces, address
1644 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1645 * use generations 1, 3, 5, ...
1646 */
1647 gen += kvm_arch_nr_memslot_as_ids(kvm);
1648
1649 kvm_arch_memslots_updated(kvm, gen);
1650
1651 slots->generation = gen;
1652 }
1653
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1654 static int kvm_prepare_memory_region(struct kvm *kvm,
1655 const struct kvm_memory_slot *old,
1656 struct kvm_memory_slot *new,
1657 enum kvm_mr_change change)
1658 {
1659 int r;
1660
1661 /*
1662 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1663 * will be freed on "commit". If logging is enabled in both old and
1664 * new, reuse the existing bitmap. If logging is enabled only in the
1665 * new and KVM isn't using a ring buffer, allocate and initialize a
1666 * new bitmap.
1667 */
1668 if (change != KVM_MR_DELETE) {
1669 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1670 new->dirty_bitmap = NULL;
1671 else if (old && old->dirty_bitmap)
1672 new->dirty_bitmap = old->dirty_bitmap;
1673 else if (kvm_use_dirty_bitmap(kvm)) {
1674 r = kvm_alloc_dirty_bitmap(new);
1675 if (r)
1676 return r;
1677
1678 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1679 bitmap_set(new->dirty_bitmap, 0, new->npages);
1680 }
1681 }
1682
1683 r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1684
1685 /* Free the bitmap on failure if it was allocated above. */
1686 if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1687 kvm_destroy_dirty_bitmap(new);
1688
1689 return r;
1690 }
1691
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1692 static void kvm_commit_memory_region(struct kvm *kvm,
1693 struct kvm_memory_slot *old,
1694 const struct kvm_memory_slot *new,
1695 enum kvm_mr_change change)
1696 {
1697 int old_flags = old ? old->flags : 0;
1698 int new_flags = new ? new->flags : 0;
1699 /*
1700 * Update the total number of memslot pages before calling the arch
1701 * hook so that architectures can consume the result directly.
1702 */
1703 if (change == KVM_MR_DELETE)
1704 kvm->nr_memslot_pages -= old->npages;
1705 else if (change == KVM_MR_CREATE)
1706 kvm->nr_memslot_pages += new->npages;
1707
1708 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1709 int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1710 atomic_set(&kvm->nr_memslots_dirty_logging,
1711 atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1712 }
1713
1714 kvm_arch_commit_memory_region(kvm, old, new, change);
1715
1716 switch (change) {
1717 case KVM_MR_CREATE:
1718 /* Nothing more to do. */
1719 break;
1720 case KVM_MR_DELETE:
1721 /* Free the old memslot and all its metadata. */
1722 kvm_free_memslot(kvm, old);
1723 break;
1724 case KVM_MR_MOVE:
1725 case KVM_MR_FLAGS_ONLY:
1726 /*
1727 * Free the dirty bitmap as needed; the below check encompasses
1728 * both the flags and whether a ring buffer is being used)
1729 */
1730 if (old->dirty_bitmap && !new->dirty_bitmap)
1731 kvm_destroy_dirty_bitmap(old);
1732
1733 /*
1734 * The final quirk. Free the detached, old slot, but only its
1735 * memory, not any metadata. Metadata, including arch specific
1736 * data, may be reused by @new.
1737 */
1738 kfree(old);
1739 break;
1740 default:
1741 BUG();
1742 }
1743 }
1744
1745 /*
1746 * Activate @new, which must be installed in the inactive slots by the caller,
1747 * by swapping the active slots and then propagating @new to @old once @old is
1748 * unreachable and can be safely modified.
1749 *
1750 * With NULL @old this simply adds @new to @active (while swapping the sets).
1751 * With NULL @new this simply removes @old from @active and frees it
1752 * (while also swapping the sets).
1753 */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1754 static void kvm_activate_memslot(struct kvm *kvm,
1755 struct kvm_memory_slot *old,
1756 struct kvm_memory_slot *new)
1757 {
1758 int as_id = kvm_memslots_get_as_id(old, new);
1759
1760 kvm_swap_active_memslots(kvm, as_id);
1761
1762 /* Propagate the new memslot to the now inactive memslots. */
1763 kvm_replace_memslot(kvm, old, new);
1764 }
1765
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1766 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1767 const struct kvm_memory_slot *src)
1768 {
1769 dest->base_gfn = src->base_gfn;
1770 dest->npages = src->npages;
1771 dest->dirty_bitmap = src->dirty_bitmap;
1772 dest->arch = src->arch;
1773 dest->userspace_addr = src->userspace_addr;
1774 dest->flags = src->flags;
1775 dest->id = src->id;
1776 dest->as_id = src->as_id;
1777 }
1778
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1779 static void kvm_invalidate_memslot(struct kvm *kvm,
1780 struct kvm_memory_slot *old,
1781 struct kvm_memory_slot *invalid_slot)
1782 {
1783 /*
1784 * Mark the current slot INVALID. As with all memslot modifications,
1785 * this must be done on an unreachable slot to avoid modifying the
1786 * current slot in the active tree.
1787 */
1788 kvm_copy_memslot(invalid_slot, old);
1789 invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1790 kvm_replace_memslot(kvm, old, invalid_slot);
1791
1792 /*
1793 * Activate the slot that is now marked INVALID, but don't propagate
1794 * the slot to the now inactive slots. The slot is either going to be
1795 * deleted or recreated as a new slot.
1796 */
1797 kvm_swap_active_memslots(kvm, old->as_id);
1798
1799 /*
1800 * From this point no new shadow pages pointing to a deleted, or moved,
1801 * memslot will be created. Validation of sp->gfn happens in:
1802 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1803 * - kvm_is_visible_gfn (mmu_check_root)
1804 */
1805 kvm_arch_flush_shadow_memslot(kvm, old);
1806 kvm_arch_guest_memory_reclaimed(kvm);
1807
1808 /* Was released by kvm_swap_active_memslots(), reacquire. */
1809 mutex_lock(&kvm->slots_arch_lock);
1810
1811 /*
1812 * Copy the arch-specific field of the newly-installed slot back to the
1813 * old slot as the arch data could have changed between releasing
1814 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1815 * above. Writers are required to retrieve memslots *after* acquiring
1816 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1817 */
1818 old->arch = invalid_slot->arch;
1819 }
1820
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1821 static void kvm_create_memslot(struct kvm *kvm,
1822 struct kvm_memory_slot *new)
1823 {
1824 /* Add the new memslot to the inactive set and activate. */
1825 kvm_replace_memslot(kvm, NULL, new);
1826 kvm_activate_memslot(kvm, NULL, new);
1827 }
1828
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1829 static void kvm_delete_memslot(struct kvm *kvm,
1830 struct kvm_memory_slot *old,
1831 struct kvm_memory_slot *invalid_slot)
1832 {
1833 /*
1834 * Remove the old memslot (in the inactive memslots) by passing NULL as
1835 * the "new" slot, and for the invalid version in the active slots.
1836 */
1837 kvm_replace_memslot(kvm, old, NULL);
1838 kvm_activate_memslot(kvm, invalid_slot, NULL);
1839 }
1840
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1841 static void kvm_move_memslot(struct kvm *kvm,
1842 struct kvm_memory_slot *old,
1843 struct kvm_memory_slot *new,
1844 struct kvm_memory_slot *invalid_slot)
1845 {
1846 /*
1847 * Replace the old memslot in the inactive slots, and then swap slots
1848 * and replace the current INVALID with the new as well.
1849 */
1850 kvm_replace_memslot(kvm, old, new);
1851 kvm_activate_memslot(kvm, invalid_slot, new);
1852 }
1853
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1854 static void kvm_update_flags_memslot(struct kvm *kvm,
1855 struct kvm_memory_slot *old,
1856 struct kvm_memory_slot *new)
1857 {
1858 /*
1859 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1860 * an intermediate step. Instead, the old memslot is simply replaced
1861 * with a new, updated copy in both memslot sets.
1862 */
1863 kvm_replace_memslot(kvm, old, new);
1864 kvm_activate_memslot(kvm, old, new);
1865 }
1866
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1867 static int kvm_set_memslot(struct kvm *kvm,
1868 struct kvm_memory_slot *old,
1869 struct kvm_memory_slot *new,
1870 enum kvm_mr_change change)
1871 {
1872 struct kvm_memory_slot *invalid_slot;
1873 int r;
1874
1875 /*
1876 * Released in kvm_swap_active_memslots().
1877 *
1878 * Must be held from before the current memslots are copied until after
1879 * the new memslots are installed with rcu_assign_pointer, then
1880 * released before the synchronize srcu in kvm_swap_active_memslots().
1881 *
1882 * When modifying memslots outside of the slots_lock, must be held
1883 * before reading the pointer to the current memslots until after all
1884 * changes to those memslots are complete.
1885 *
1886 * These rules ensure that installing new memslots does not lose
1887 * changes made to the previous memslots.
1888 */
1889 mutex_lock(&kvm->slots_arch_lock);
1890
1891 /*
1892 * Invalidate the old slot if it's being deleted or moved. This is
1893 * done prior to actually deleting/moving the memslot to allow vCPUs to
1894 * continue running by ensuring there are no mappings or shadow pages
1895 * for the memslot when it is deleted/moved. Without pre-invalidation
1896 * (and without a lock), a window would exist between effecting the
1897 * delete/move and committing the changes in arch code where KVM or a
1898 * guest could access a non-existent memslot.
1899 *
1900 * Modifications are done on a temporary, unreachable slot. The old
1901 * slot needs to be preserved in case a later step fails and the
1902 * invalidation needs to be reverted.
1903 */
1904 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1905 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1906 if (!invalid_slot) {
1907 mutex_unlock(&kvm->slots_arch_lock);
1908 return -ENOMEM;
1909 }
1910 kvm_invalidate_memslot(kvm, old, invalid_slot);
1911 }
1912
1913 r = kvm_prepare_memory_region(kvm, old, new, change);
1914 if (r) {
1915 /*
1916 * For DELETE/MOVE, revert the above INVALID change. No
1917 * modifications required since the original slot was preserved
1918 * in the inactive slots. Changing the active memslots also
1919 * release slots_arch_lock.
1920 */
1921 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1922 kvm_activate_memslot(kvm, invalid_slot, old);
1923 kfree(invalid_slot);
1924 } else {
1925 mutex_unlock(&kvm->slots_arch_lock);
1926 }
1927 return r;
1928 }
1929
1930 /*
1931 * For DELETE and MOVE, the working slot is now active as the INVALID
1932 * version of the old slot. MOVE is particularly special as it reuses
1933 * the old slot and returns a copy of the old slot (in working_slot).
1934 * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the
1935 * old slot is detached but otherwise preserved.
1936 */
1937 if (change == KVM_MR_CREATE)
1938 kvm_create_memslot(kvm, new);
1939 else if (change == KVM_MR_DELETE)
1940 kvm_delete_memslot(kvm, old, invalid_slot);
1941 else if (change == KVM_MR_MOVE)
1942 kvm_move_memslot(kvm, old, new, invalid_slot);
1943 else if (change == KVM_MR_FLAGS_ONLY)
1944 kvm_update_flags_memslot(kvm, old, new);
1945 else
1946 BUG();
1947
1948 /* Free the temporary INVALID slot used for DELETE and MOVE. */
1949 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1950 kfree(invalid_slot);
1951
1952 /*
1953 * No need to refresh new->arch, changes after dropping slots_arch_lock
1954 * will directly hit the final, active memslot. Architectures are
1955 * responsible for knowing that new->arch may be stale.
1956 */
1957 kvm_commit_memory_region(kvm, old, new, change);
1958
1959 return 0;
1960 }
1961
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1962 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1963 gfn_t start, gfn_t end)
1964 {
1965 struct kvm_memslot_iter iter;
1966
1967 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1968 if (iter.slot->id != id)
1969 return true;
1970 }
1971
1972 return false;
1973 }
1974
1975 /*
1976 * Allocate some memory and give it an address in the guest physical address
1977 * space.
1978 *
1979 * Discontiguous memory is allowed, mostly for framebuffers.
1980 *
1981 * Must be called holding kvm->slots_lock for write.
1982 */
__kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1983 int __kvm_set_memory_region(struct kvm *kvm,
1984 const struct kvm_userspace_memory_region2 *mem)
1985 {
1986 struct kvm_memory_slot *old, *new;
1987 struct kvm_memslots *slots;
1988 enum kvm_mr_change change;
1989 unsigned long npages;
1990 gfn_t base_gfn;
1991 int as_id, id;
1992 int r;
1993
1994 r = check_memory_region_flags(kvm, mem);
1995 if (r)
1996 return r;
1997
1998 as_id = mem->slot >> 16;
1999 id = (u16)mem->slot;
2000
2001 /* General sanity checks */
2002 if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2003 (mem->memory_size != (unsigned long)mem->memory_size))
2004 return -EINVAL;
2005 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2006 return -EINVAL;
2007 /* We can read the guest memory with __xxx_user() later on. */
2008 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2009 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2010 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2011 mem->memory_size))
2012 return -EINVAL;
2013 if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2014 (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2015 mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2016 return -EINVAL;
2017 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2018 return -EINVAL;
2019 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2020 return -EINVAL;
2021 if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2022 return -EINVAL;
2023
2024 slots = __kvm_memslots(kvm, as_id);
2025
2026 /*
2027 * Note, the old memslot (and the pointer itself!) may be invalidated
2028 * and/or destroyed by kvm_set_memslot().
2029 */
2030 old = id_to_memslot(slots, id);
2031
2032 if (!mem->memory_size) {
2033 if (!old || !old->npages)
2034 return -EINVAL;
2035
2036 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2037 return -EIO;
2038
2039 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2040 }
2041
2042 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2043 npages = (mem->memory_size >> PAGE_SHIFT);
2044
2045 if (!old || !old->npages) {
2046 change = KVM_MR_CREATE;
2047
2048 /*
2049 * To simplify KVM internals, the total number of pages across
2050 * all memslots must fit in an unsigned long.
2051 */
2052 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2053 return -EINVAL;
2054 } else { /* Modify an existing slot. */
2055 /* Private memslots are immutable, they can only be deleted. */
2056 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2057 return -EINVAL;
2058 if ((mem->userspace_addr != old->userspace_addr) ||
2059 (npages != old->npages) ||
2060 ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2061 return -EINVAL;
2062
2063 if (base_gfn != old->base_gfn)
2064 change = KVM_MR_MOVE;
2065 else if (mem->flags != old->flags)
2066 change = KVM_MR_FLAGS_ONLY;
2067 else /* Nothing to change. */
2068 return 0;
2069 }
2070
2071 if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2072 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2073 return -EEXIST;
2074
2075 /* Allocate a slot that will persist in the memslot. */
2076 new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2077 if (!new)
2078 return -ENOMEM;
2079
2080 new->as_id = as_id;
2081 new->id = id;
2082 new->base_gfn = base_gfn;
2083 new->npages = npages;
2084 new->flags = mem->flags;
2085 new->userspace_addr = mem->userspace_addr;
2086 if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2087 r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2088 if (r)
2089 goto out;
2090 }
2091
2092 r = kvm_set_memslot(kvm, old, new, change);
2093 if (r)
2094 goto out_unbind;
2095
2096 return 0;
2097
2098 out_unbind:
2099 if (mem->flags & KVM_MEM_GUEST_MEMFD)
2100 kvm_gmem_unbind(new);
2101 out:
2102 kfree(new);
2103 return r;
2104 }
2105 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2106
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2107 int kvm_set_memory_region(struct kvm *kvm,
2108 const struct kvm_userspace_memory_region2 *mem)
2109 {
2110 int r;
2111
2112 mutex_lock(&kvm->slots_lock);
2113 r = __kvm_set_memory_region(kvm, mem);
2114 mutex_unlock(&kvm->slots_lock);
2115 return r;
2116 }
2117 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2118
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem)2119 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2120 struct kvm_userspace_memory_region2 *mem)
2121 {
2122 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2123 return -EINVAL;
2124
2125 return kvm_set_memory_region(kvm, mem);
2126 }
2127
2128 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2129 /**
2130 * kvm_get_dirty_log - get a snapshot of dirty pages
2131 * @kvm: pointer to kvm instance
2132 * @log: slot id and address to which we copy the log
2133 * @is_dirty: set to '1' if any dirty pages were found
2134 * @memslot: set to the associated memslot, always valid on success
2135 */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)2136 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2137 int *is_dirty, struct kvm_memory_slot **memslot)
2138 {
2139 struct kvm_memslots *slots;
2140 int i, as_id, id;
2141 unsigned long n;
2142 unsigned long any = 0;
2143
2144 /* Dirty ring tracking may be exclusive to dirty log tracking */
2145 if (!kvm_use_dirty_bitmap(kvm))
2146 return -ENXIO;
2147
2148 *memslot = NULL;
2149 *is_dirty = 0;
2150
2151 as_id = log->slot >> 16;
2152 id = (u16)log->slot;
2153 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2154 return -EINVAL;
2155
2156 slots = __kvm_memslots(kvm, as_id);
2157 *memslot = id_to_memslot(slots, id);
2158 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2159 return -ENOENT;
2160
2161 kvm_arch_sync_dirty_log(kvm, *memslot);
2162
2163 n = kvm_dirty_bitmap_bytes(*memslot);
2164
2165 for (i = 0; !any && i < n/sizeof(long); ++i)
2166 any = (*memslot)->dirty_bitmap[i];
2167
2168 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2169 return -EFAULT;
2170
2171 if (any)
2172 *is_dirty = 1;
2173 return 0;
2174 }
2175 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2176
2177 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2178 /**
2179 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2180 * and reenable dirty page tracking for the corresponding pages.
2181 * @kvm: pointer to kvm instance
2182 * @log: slot id and address to which we copy the log
2183 *
2184 * We need to keep it in mind that VCPU threads can write to the bitmap
2185 * concurrently. So, to avoid losing track of dirty pages we keep the
2186 * following order:
2187 *
2188 * 1. Take a snapshot of the bit and clear it if needed.
2189 * 2. Write protect the corresponding page.
2190 * 3. Copy the snapshot to the userspace.
2191 * 4. Upon return caller flushes TLB's if needed.
2192 *
2193 * Between 2 and 4, the guest may write to the page using the remaining TLB
2194 * entry. This is not a problem because the page is reported dirty using
2195 * the snapshot taken before and step 4 ensures that writes done after
2196 * exiting to userspace will be logged for the next call.
2197 *
2198 */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)2199 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2200 {
2201 struct kvm_memslots *slots;
2202 struct kvm_memory_slot *memslot;
2203 int i, as_id, id;
2204 unsigned long n;
2205 unsigned long *dirty_bitmap;
2206 unsigned long *dirty_bitmap_buffer;
2207 bool flush;
2208
2209 /* Dirty ring tracking may be exclusive to dirty log tracking */
2210 if (!kvm_use_dirty_bitmap(kvm))
2211 return -ENXIO;
2212
2213 as_id = log->slot >> 16;
2214 id = (u16)log->slot;
2215 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2216 return -EINVAL;
2217
2218 slots = __kvm_memslots(kvm, as_id);
2219 memslot = id_to_memslot(slots, id);
2220 if (!memslot || !memslot->dirty_bitmap)
2221 return -ENOENT;
2222
2223 dirty_bitmap = memslot->dirty_bitmap;
2224
2225 kvm_arch_sync_dirty_log(kvm, memslot);
2226
2227 n = kvm_dirty_bitmap_bytes(memslot);
2228 flush = false;
2229 if (kvm->manual_dirty_log_protect) {
2230 /*
2231 * Unlike kvm_get_dirty_log, we always return false in *flush,
2232 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
2233 * is some code duplication between this function and
2234 * kvm_get_dirty_log, but hopefully all architecture
2235 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2236 * can be eliminated.
2237 */
2238 dirty_bitmap_buffer = dirty_bitmap;
2239 } else {
2240 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2241 memset(dirty_bitmap_buffer, 0, n);
2242
2243 KVM_MMU_LOCK(kvm);
2244 for (i = 0; i < n / sizeof(long); i++) {
2245 unsigned long mask;
2246 gfn_t offset;
2247
2248 if (!dirty_bitmap[i])
2249 continue;
2250
2251 flush = true;
2252 mask = xchg(&dirty_bitmap[i], 0);
2253 dirty_bitmap_buffer[i] = mask;
2254
2255 offset = i * BITS_PER_LONG;
2256 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2257 offset, mask);
2258 }
2259 KVM_MMU_UNLOCK(kvm);
2260 }
2261
2262 if (flush)
2263 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2264
2265 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2266 return -EFAULT;
2267 return 0;
2268 }
2269
2270
2271 /**
2272 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2273 * @kvm: kvm instance
2274 * @log: slot id and address to which we copy the log
2275 *
2276 * Steps 1-4 below provide general overview of dirty page logging. See
2277 * kvm_get_dirty_log_protect() function description for additional details.
2278 *
2279 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2280 * always flush the TLB (step 4) even if previous step failed and the dirty
2281 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2282 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2283 * writes will be marked dirty for next log read.
2284 *
2285 * 1. Take a snapshot of the bit and clear it if needed.
2286 * 2. Write protect the corresponding page.
2287 * 3. Copy the snapshot to the userspace.
2288 * 4. Flush TLB's if needed.
2289 */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)2290 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2291 struct kvm_dirty_log *log)
2292 {
2293 int r;
2294
2295 mutex_lock(&kvm->slots_lock);
2296
2297 r = kvm_get_dirty_log_protect(kvm, log);
2298
2299 mutex_unlock(&kvm->slots_lock);
2300 return r;
2301 }
2302
2303 /**
2304 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2305 * and reenable dirty page tracking for the corresponding pages.
2306 * @kvm: pointer to kvm instance
2307 * @log: slot id and address from which to fetch the bitmap of dirty pages
2308 */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)2309 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2310 struct kvm_clear_dirty_log *log)
2311 {
2312 struct kvm_memslots *slots;
2313 struct kvm_memory_slot *memslot;
2314 int as_id, id;
2315 gfn_t offset;
2316 unsigned long i, n;
2317 unsigned long *dirty_bitmap;
2318 unsigned long *dirty_bitmap_buffer;
2319 bool flush;
2320
2321 /* Dirty ring tracking may be exclusive to dirty log tracking */
2322 if (!kvm_use_dirty_bitmap(kvm))
2323 return -ENXIO;
2324
2325 as_id = log->slot >> 16;
2326 id = (u16)log->slot;
2327 if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2328 return -EINVAL;
2329
2330 if (log->first_page & 63)
2331 return -EINVAL;
2332
2333 slots = __kvm_memslots(kvm, as_id);
2334 memslot = id_to_memslot(slots, id);
2335 if (!memslot || !memslot->dirty_bitmap)
2336 return -ENOENT;
2337
2338 dirty_bitmap = memslot->dirty_bitmap;
2339
2340 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2341
2342 if (log->first_page > memslot->npages ||
2343 log->num_pages > memslot->npages - log->first_page ||
2344 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2345 return -EINVAL;
2346
2347 kvm_arch_sync_dirty_log(kvm, memslot);
2348
2349 flush = false;
2350 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2351 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2352 return -EFAULT;
2353
2354 KVM_MMU_LOCK(kvm);
2355 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2356 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2357 i++, offset += BITS_PER_LONG) {
2358 unsigned long mask = *dirty_bitmap_buffer++;
2359 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2360 if (!mask)
2361 continue;
2362
2363 mask &= atomic_long_fetch_andnot(mask, p);
2364
2365 /*
2366 * mask contains the bits that really have been cleared. This
2367 * never includes any bits beyond the length of the memslot (if
2368 * the length is not aligned to 64 pages), therefore it is not
2369 * a problem if userspace sets them in log->dirty_bitmap.
2370 */
2371 if (mask) {
2372 flush = true;
2373 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2374 offset, mask);
2375 }
2376 }
2377 KVM_MMU_UNLOCK(kvm);
2378
2379 if (flush)
2380 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2381
2382 return 0;
2383 }
2384
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2385 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2386 struct kvm_clear_dirty_log *log)
2387 {
2388 int r;
2389
2390 mutex_lock(&kvm->slots_lock);
2391
2392 r = kvm_clear_dirty_log_protect(kvm, log);
2393
2394 mutex_unlock(&kvm->slots_lock);
2395 return r;
2396 }
2397 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2398
2399 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_supported_mem_attributes(struct kvm * kvm)2400 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2401 {
2402 if (!kvm || kvm_arch_has_private_mem(kvm))
2403 return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2404
2405 return 0;
2406 }
2407
2408 /*
2409 * Returns true if _all_ gfns in the range [@start, @end) have attributes
2410 * such that the bits in @mask match @attrs.
2411 */
kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs)2412 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2413 unsigned long mask, unsigned long attrs)
2414 {
2415 XA_STATE(xas, &kvm->mem_attr_array, start);
2416 unsigned long index;
2417 void *entry;
2418
2419 mask &= kvm_supported_mem_attributes(kvm);
2420 if (attrs & ~mask)
2421 return false;
2422
2423 if (end == start + 1)
2424 return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2425
2426 guard(rcu)();
2427 if (!attrs)
2428 return !xas_find(&xas, end - 1);
2429
2430 for (index = start; index < end; index++) {
2431 do {
2432 entry = xas_next(&xas);
2433 } while (xas_retry(&xas, entry));
2434
2435 if (xas.xa_index != index ||
2436 (xa_to_value(entry) & mask) != attrs)
2437 return false;
2438 }
2439
2440 return true;
2441 }
2442
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range)2443 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2444 struct kvm_mmu_notifier_range *range)
2445 {
2446 struct kvm_gfn_range gfn_range;
2447 struct kvm_memory_slot *slot;
2448 struct kvm_memslots *slots;
2449 struct kvm_memslot_iter iter;
2450 bool found_memslot = false;
2451 bool ret = false;
2452 int i;
2453
2454 gfn_range.arg = range->arg;
2455 gfn_range.may_block = range->may_block;
2456
2457 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2458 slots = __kvm_memslots(kvm, i);
2459
2460 kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2461 slot = iter.slot;
2462 gfn_range.slot = slot;
2463
2464 gfn_range.start = max(range->start, slot->base_gfn);
2465 gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2466 if (gfn_range.start >= gfn_range.end)
2467 continue;
2468
2469 if (!found_memslot) {
2470 found_memslot = true;
2471 KVM_MMU_LOCK(kvm);
2472 if (!IS_KVM_NULL_FN(range->on_lock))
2473 range->on_lock(kvm);
2474 }
2475
2476 ret |= range->handler(kvm, &gfn_range);
2477 }
2478 }
2479
2480 if (range->flush_on_ret && ret)
2481 kvm_flush_remote_tlbs(kvm);
2482
2483 if (found_memslot)
2484 KVM_MMU_UNLOCK(kvm);
2485 }
2486
kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)2487 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2488 struct kvm_gfn_range *range)
2489 {
2490 /*
2491 * Unconditionally add the range to the invalidation set, regardless of
2492 * whether or not the arch callback actually needs to zap SPTEs. E.g.
2493 * if KVM supports RWX attributes in the future and the attributes are
2494 * going from R=>RW, zapping isn't strictly necessary. Unconditionally
2495 * adding the range allows KVM to require that MMU invalidations add at
2496 * least one range between begin() and end(), e.g. allows KVM to detect
2497 * bugs where the add() is missed. Relaxing the rule *might* be safe,
2498 * but it's not obvious that allowing new mappings while the attributes
2499 * are in flux is desirable or worth the complexity.
2500 */
2501 kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2502
2503 return kvm_arch_pre_set_memory_attributes(kvm, range);
2504 }
2505
2506 /* Set @attributes for the gfn range [@start, @end). */
kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes)2507 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2508 unsigned long attributes)
2509 {
2510 struct kvm_mmu_notifier_range pre_set_range = {
2511 .start = start,
2512 .end = end,
2513 .handler = kvm_pre_set_memory_attributes,
2514 .on_lock = kvm_mmu_invalidate_begin,
2515 .flush_on_ret = true,
2516 .may_block = true,
2517 };
2518 struct kvm_mmu_notifier_range post_set_range = {
2519 .start = start,
2520 .end = end,
2521 .arg.attributes = attributes,
2522 .handler = kvm_arch_post_set_memory_attributes,
2523 .on_lock = kvm_mmu_invalidate_end,
2524 .may_block = true,
2525 };
2526 unsigned long i;
2527 void *entry;
2528 int r = 0;
2529
2530 entry = attributes ? xa_mk_value(attributes) : NULL;
2531
2532 mutex_lock(&kvm->slots_lock);
2533
2534 /* Nothing to do if the entire range as the desired attributes. */
2535 if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2536 goto out_unlock;
2537
2538 /*
2539 * Reserve memory ahead of time to avoid having to deal with failures
2540 * partway through setting the new attributes.
2541 */
2542 for (i = start; i < end; i++) {
2543 r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2544 if (r)
2545 goto out_unlock;
2546 }
2547
2548 kvm_handle_gfn_range(kvm, &pre_set_range);
2549
2550 for (i = start; i < end; i++) {
2551 r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2552 GFP_KERNEL_ACCOUNT));
2553 KVM_BUG_ON(r, kvm);
2554 }
2555
2556 kvm_handle_gfn_range(kvm, &post_set_range);
2557
2558 out_unlock:
2559 mutex_unlock(&kvm->slots_lock);
2560
2561 return r;
2562 }
kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs)2563 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2564 struct kvm_memory_attributes *attrs)
2565 {
2566 gfn_t start, end;
2567
2568 /* flags is currently not used. */
2569 if (attrs->flags)
2570 return -EINVAL;
2571 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2572 return -EINVAL;
2573 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2574 return -EINVAL;
2575 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2576 return -EINVAL;
2577
2578 start = attrs->address >> PAGE_SHIFT;
2579 end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2580
2581 /*
2582 * xarray tracks data using "unsigned long", and as a result so does
2583 * KVM. For simplicity, supports generic attributes only on 64-bit
2584 * architectures.
2585 */
2586 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2587
2588 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2589 }
2590 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2591
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2592 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2593 {
2594 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2595 }
2596 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2597
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2598 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2599 {
2600 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2601 u64 gen = slots->generation;
2602 struct kvm_memory_slot *slot;
2603
2604 /*
2605 * This also protects against using a memslot from a different address space,
2606 * since different address spaces have different generation numbers.
2607 */
2608 if (unlikely(gen != vcpu->last_used_slot_gen)) {
2609 vcpu->last_used_slot = NULL;
2610 vcpu->last_used_slot_gen = gen;
2611 }
2612
2613 slot = try_get_memslot(vcpu->last_used_slot, gfn);
2614 if (slot)
2615 return slot;
2616
2617 /*
2618 * Fall back to searching all memslots. We purposely use
2619 * search_memslots() instead of __gfn_to_memslot() to avoid
2620 * thrashing the VM-wide last_used_slot in kvm_memslots.
2621 */
2622 slot = search_memslots(slots, gfn, false);
2623 if (slot) {
2624 vcpu->last_used_slot = slot;
2625 return slot;
2626 }
2627
2628 return NULL;
2629 }
2630
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)2631 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2632 {
2633 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2634
2635 return kvm_is_visible_memslot(memslot);
2636 }
2637 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2638
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)2639 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2640 {
2641 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2642
2643 return kvm_is_visible_memslot(memslot);
2644 }
2645 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2646
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)2647 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2648 {
2649 struct vm_area_struct *vma;
2650 unsigned long addr, size;
2651
2652 size = PAGE_SIZE;
2653
2654 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2655 if (kvm_is_error_hva(addr))
2656 return PAGE_SIZE;
2657
2658 mmap_read_lock(current->mm);
2659 vma = find_vma(current->mm, addr);
2660 if (!vma)
2661 goto out;
2662
2663 size = vma_kernel_pagesize(vma);
2664
2665 out:
2666 mmap_read_unlock(current->mm);
2667
2668 return size;
2669 }
2670
memslot_is_readonly(const struct kvm_memory_slot * slot)2671 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2672 {
2673 return slot->flags & KVM_MEM_READONLY;
2674 }
2675
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)2676 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2677 gfn_t *nr_pages, bool write)
2678 {
2679 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2680 return KVM_HVA_ERR_BAD;
2681
2682 if (memslot_is_readonly(slot) && write)
2683 return KVM_HVA_ERR_RO_BAD;
2684
2685 if (nr_pages)
2686 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2687
2688 return __gfn_to_hva_memslot(slot, gfn);
2689 }
2690
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2691 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2692 gfn_t *nr_pages)
2693 {
2694 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2695 }
2696
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2697 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2698 gfn_t gfn)
2699 {
2700 return gfn_to_hva_many(slot, gfn, NULL);
2701 }
2702 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2703
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2704 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2705 {
2706 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2707 }
2708 EXPORT_SYMBOL_GPL(gfn_to_hva);
2709
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)2710 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2711 {
2712 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2713 }
2714 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2715
2716 /*
2717 * Return the hva of a @gfn and the R/W attribute if possible.
2718 *
2719 * @slot: the kvm_memory_slot which contains @gfn
2720 * @gfn: the gfn to be translated
2721 * @writable: used to return the read/write attribute of the @slot if the hva
2722 * is valid and @writable is not NULL
2723 */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2724 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2725 gfn_t gfn, bool *writable)
2726 {
2727 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2728
2729 if (!kvm_is_error_hva(hva) && writable)
2730 *writable = !memslot_is_readonly(slot);
2731
2732 return hva;
2733 }
2734
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)2735 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2736 {
2737 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2738
2739 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2740 }
2741
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)2742 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2743 {
2744 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2745
2746 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2747 }
2748
check_user_page_hwpoison(unsigned long addr)2749 static inline int check_user_page_hwpoison(unsigned long addr)
2750 {
2751 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2752
2753 rc = get_user_pages(addr, 1, flags, NULL);
2754 return rc == -EHWPOISON;
2755 }
2756
2757 /*
2758 * The fast path to get the writable pfn which will be stored in @pfn,
2759 * true indicates success, otherwise false is returned. It's also the
2760 * only part that runs if we can in atomic context.
2761 */
hva_to_pfn_fast(unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * pfn)2762 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2763 bool *writable, kvm_pfn_t *pfn)
2764 {
2765 struct page *page[1];
2766
2767 /*
2768 * Fast pin a writable pfn only if it is a write fault request
2769 * or the caller allows to map a writable pfn for a read fault
2770 * request.
2771 */
2772 if (!(write_fault || writable))
2773 return false;
2774
2775 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2776 *pfn = page_to_pfn(page[0]);
2777
2778 if (writable)
2779 *writable = true;
2780 return true;
2781 }
2782
2783 return false;
2784 }
2785
2786 /*
2787 * The slow path to get the pfn of the specified host virtual address,
2788 * 1 indicates success, -errno is returned if error is detected.
2789 */
hva_to_pfn_slow(unsigned long addr,bool * async,bool write_fault,bool interruptible,bool * writable,kvm_pfn_t * pfn)2790 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2791 bool interruptible, bool *writable, kvm_pfn_t *pfn)
2792 {
2793 /*
2794 * When a VCPU accesses a page that is not mapped into the secondary
2795 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2796 * make progress. We always want to honor NUMA hinting faults in that
2797 * case, because GUP usage corresponds to memory accesses from the VCPU.
2798 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2799 * mapped into the secondary MMU and gets accessed by a VCPU.
2800 *
2801 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2802 * implicitly honor NUMA hinting faults and don't need this flag.
2803 */
2804 unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2805 struct page *page;
2806 int npages;
2807
2808 might_sleep();
2809
2810 if (writable)
2811 *writable = write_fault;
2812
2813 if (write_fault)
2814 flags |= FOLL_WRITE;
2815 if (async)
2816 flags |= FOLL_NOWAIT;
2817 if (interruptible)
2818 flags |= FOLL_INTERRUPTIBLE;
2819
2820 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2821 if (npages != 1)
2822 return npages;
2823
2824 /* map read fault as writable if possible */
2825 if (unlikely(!write_fault) && writable) {
2826 struct page *wpage;
2827
2828 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2829 *writable = true;
2830 put_page(page);
2831 page = wpage;
2832 }
2833 }
2834 *pfn = page_to_pfn(page);
2835 return npages;
2836 }
2837
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2838 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2839 {
2840 if (unlikely(!(vma->vm_flags & VM_READ)))
2841 return false;
2842
2843 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2844 return false;
2845
2846 return true;
2847 }
2848
kvm_try_get_pfn(kvm_pfn_t pfn)2849 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2850 {
2851 struct page *page = kvm_pfn_to_refcounted_page(pfn);
2852
2853 if (!page)
2854 return 1;
2855
2856 return get_page_unless_zero(page);
2857 }
2858
hva_to_pfn_remapped(struct vm_area_struct * vma,unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * p_pfn)2859 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2860 unsigned long addr, bool write_fault,
2861 bool *writable, kvm_pfn_t *p_pfn)
2862 {
2863 struct follow_pfnmap_args args = { .vma = vma, .address = addr };
2864 kvm_pfn_t pfn;
2865 int r;
2866
2867 r = follow_pfnmap_start(&args);
2868 if (r) {
2869 /*
2870 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2871 * not call the fault handler, so do it here.
2872 */
2873 bool unlocked = false;
2874 r = fixup_user_fault(current->mm, addr,
2875 (write_fault ? FAULT_FLAG_WRITE : 0),
2876 &unlocked);
2877 if (unlocked)
2878 return -EAGAIN;
2879 if (r)
2880 return r;
2881
2882 r = follow_pfnmap_start(&args);
2883 if (r)
2884 return r;
2885 }
2886
2887 if (write_fault && !args.writable) {
2888 pfn = KVM_PFN_ERR_RO_FAULT;
2889 goto out;
2890 }
2891
2892 if (writable)
2893 *writable = args.writable;
2894 pfn = args.pfn;
2895
2896 /*
2897 * Get a reference here because callers of *hva_to_pfn* and
2898 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2899 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2900 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2901 * simply do nothing for reserved pfns.
2902 *
2903 * Whoever called remap_pfn_range is also going to call e.g.
2904 * unmap_mapping_range before the underlying pages are freed,
2905 * causing a call to our MMU notifier.
2906 *
2907 * Certain IO or PFNMAP mappings can be backed with valid
2908 * struct pages, but be allocated without refcounting e.g.,
2909 * tail pages of non-compound higher order allocations, which
2910 * would then underflow the refcount when the caller does the
2911 * required put_page. Don't allow those pages here.
2912 */
2913 if (!kvm_try_get_pfn(pfn))
2914 r = -EFAULT;
2915 out:
2916 follow_pfnmap_end(&args);
2917 *p_pfn = pfn;
2918
2919 return r;
2920 }
2921
2922 /*
2923 * Pin guest page in memory and return its pfn.
2924 * @addr: host virtual address which maps memory to the guest
2925 * @atomic: whether this function is forbidden from sleeping
2926 * @interruptible: whether the process can be interrupted by non-fatal signals
2927 * @async: whether this function need to wait IO complete if the
2928 * host page is not in the memory
2929 * @write_fault: whether we should get a writable host page
2930 * @writable: whether it allows to map a writable host page for !@write_fault
2931 *
2932 * The function will map a writable host page for these two cases:
2933 * 1): @write_fault = true
2934 * 2): @write_fault = false && @writable, @writable will tell the caller
2935 * whether the mapping is writable.
2936 */
hva_to_pfn(unsigned long addr,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable)2937 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2938 bool *async, bool write_fault, bool *writable)
2939 {
2940 struct vm_area_struct *vma;
2941 kvm_pfn_t pfn;
2942 int npages, r;
2943
2944 /* we can do it either atomically or asynchronously, not both */
2945 BUG_ON(atomic && async);
2946
2947 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2948 return pfn;
2949
2950 if (atomic)
2951 return KVM_PFN_ERR_FAULT;
2952
2953 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2954 writable, &pfn);
2955 if (npages == 1)
2956 return pfn;
2957 if (npages == -EINTR)
2958 return KVM_PFN_ERR_SIGPENDING;
2959
2960 mmap_read_lock(current->mm);
2961 if (npages == -EHWPOISON ||
2962 (!async && check_user_page_hwpoison(addr))) {
2963 pfn = KVM_PFN_ERR_HWPOISON;
2964 goto exit;
2965 }
2966
2967 retry:
2968 vma = vma_lookup(current->mm, addr);
2969
2970 if (vma == NULL)
2971 pfn = KVM_PFN_ERR_FAULT;
2972 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2973 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2974 if (r == -EAGAIN)
2975 goto retry;
2976 if (r < 0)
2977 pfn = KVM_PFN_ERR_FAULT;
2978 } else {
2979 if (async && vma_is_valid(vma, write_fault))
2980 *async = true;
2981 pfn = KVM_PFN_ERR_FAULT;
2982 }
2983 exit:
2984 mmap_read_unlock(current->mm);
2985 return pfn;
2986 }
2987
__gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable,hva_t * hva)2988 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2989 bool atomic, bool interruptible, bool *async,
2990 bool write_fault, bool *writable, hva_t *hva)
2991 {
2992 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2993
2994 if (hva)
2995 *hva = addr;
2996
2997 if (kvm_is_error_hva(addr)) {
2998 if (writable)
2999 *writable = false;
3000
3001 return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
3002 KVM_PFN_NOSLOT;
3003 }
3004
3005 /* Do not map writable pfn in the readonly memslot. */
3006 if (writable && memslot_is_readonly(slot)) {
3007 *writable = false;
3008 writable = NULL;
3009 }
3010
3011 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3012 writable);
3013 }
3014 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3015
gfn_to_pfn_prot(struct kvm * kvm,gfn_t gfn,bool write_fault,bool * writable)3016 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3017 bool *writable)
3018 {
3019 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3020 NULL, write_fault, writable, NULL);
3021 }
3022 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3023
gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn)3024 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
3025 {
3026 return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3027 NULL, NULL);
3028 }
3029 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3030
gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot * slot,gfn_t gfn)3031 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3032 {
3033 return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3034 NULL, NULL);
3035 }
3036 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3037
gfn_to_pfn(struct kvm * kvm,gfn_t gfn)3038 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
3039 {
3040 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
3041 }
3042 EXPORT_SYMBOL_GPL(gfn_to_pfn);
3043
gfn_to_page_many_atomic(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)3044 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3045 struct page **pages, int nr_pages)
3046 {
3047 unsigned long addr;
3048 gfn_t entry = 0;
3049
3050 addr = gfn_to_hva_many(slot, gfn, &entry);
3051 if (kvm_is_error_hva(addr))
3052 return -1;
3053
3054 if (entry < nr_pages)
3055 return 0;
3056
3057 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3058 }
3059 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3060
3061 /*
3062 * Do not use this helper unless you are absolutely certain the gfn _must_ be
3063 * backed by 'struct page'. A valid example is if the backing memslot is
3064 * controlled by KVM. Note, if the returned page is valid, it's refcount has
3065 * been elevated by gfn_to_pfn().
3066 */
gfn_to_page(struct kvm * kvm,gfn_t gfn)3067 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3068 {
3069 struct page *page;
3070 kvm_pfn_t pfn;
3071
3072 pfn = gfn_to_pfn(kvm, gfn);
3073
3074 if (is_error_noslot_pfn(pfn))
3075 return KVM_ERR_PTR_BAD_PAGE;
3076
3077 page = kvm_pfn_to_refcounted_page(pfn);
3078 if (!page)
3079 return KVM_ERR_PTR_BAD_PAGE;
3080
3081 return page;
3082 }
3083 EXPORT_SYMBOL_GPL(gfn_to_page);
3084
kvm_release_pfn(kvm_pfn_t pfn,bool dirty)3085 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3086 {
3087 if (dirty)
3088 kvm_release_pfn_dirty(pfn);
3089 else
3090 kvm_release_pfn_clean(pfn);
3091 }
3092
kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map)3093 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3094 {
3095 kvm_pfn_t pfn;
3096 void *hva = NULL;
3097 struct page *page = KVM_UNMAPPED_PAGE;
3098
3099 if (!map)
3100 return -EINVAL;
3101
3102 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3103 if (is_error_noslot_pfn(pfn))
3104 return -EINVAL;
3105
3106 if (pfn_valid(pfn)) {
3107 page = pfn_to_page(pfn);
3108 hva = kmap(page);
3109 #ifdef CONFIG_HAS_IOMEM
3110 } else {
3111 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3112 #endif
3113 }
3114
3115 if (!hva)
3116 return -EFAULT;
3117
3118 map->page = page;
3119 map->hva = hva;
3120 map->pfn = pfn;
3121 map->gfn = gfn;
3122
3123 return 0;
3124 }
3125 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3126
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map,bool dirty)3127 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3128 {
3129 if (!map)
3130 return;
3131
3132 if (!map->hva)
3133 return;
3134
3135 if (map->page != KVM_UNMAPPED_PAGE)
3136 kunmap(map->page);
3137 #ifdef CONFIG_HAS_IOMEM
3138 else
3139 memunmap(map->hva);
3140 #endif
3141
3142 if (dirty)
3143 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3144
3145 kvm_release_pfn(map->pfn, dirty);
3146
3147 map->hva = NULL;
3148 map->page = NULL;
3149 }
3150 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3151
kvm_is_ad_tracked_page(struct page * page)3152 static bool kvm_is_ad_tracked_page(struct page *page)
3153 {
3154 /*
3155 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3156 * touched (e.g. set dirty) except by its owner".
3157 */
3158 return !PageReserved(page);
3159 }
3160
kvm_set_page_dirty(struct page * page)3161 static void kvm_set_page_dirty(struct page *page)
3162 {
3163 if (kvm_is_ad_tracked_page(page))
3164 SetPageDirty(page);
3165 }
3166
kvm_set_page_accessed(struct page * page)3167 static void kvm_set_page_accessed(struct page *page)
3168 {
3169 if (kvm_is_ad_tracked_page(page))
3170 mark_page_accessed(page);
3171 }
3172
kvm_release_page_clean(struct page * page)3173 void kvm_release_page_clean(struct page *page)
3174 {
3175 WARN_ON(is_error_page(page));
3176
3177 kvm_set_page_accessed(page);
3178 put_page(page);
3179 }
3180 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3181
kvm_release_pfn_clean(kvm_pfn_t pfn)3182 void kvm_release_pfn_clean(kvm_pfn_t pfn)
3183 {
3184 struct page *page;
3185
3186 if (is_error_noslot_pfn(pfn))
3187 return;
3188
3189 page = kvm_pfn_to_refcounted_page(pfn);
3190 if (!page)
3191 return;
3192
3193 kvm_release_page_clean(page);
3194 }
3195 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3196
kvm_release_page_dirty(struct page * page)3197 void kvm_release_page_dirty(struct page *page)
3198 {
3199 WARN_ON(is_error_page(page));
3200
3201 kvm_set_page_dirty(page);
3202 kvm_release_page_clean(page);
3203 }
3204 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3205
kvm_release_pfn_dirty(kvm_pfn_t pfn)3206 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3207 {
3208 struct page *page;
3209
3210 if (is_error_noslot_pfn(pfn))
3211 return;
3212
3213 page = kvm_pfn_to_refcounted_page(pfn);
3214 if (!page)
3215 return;
3216
3217 kvm_release_page_dirty(page);
3218 }
3219 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3220
3221 /*
3222 * Note, checking for an error/noslot pfn is the caller's responsibility when
3223 * directly marking a page dirty/accessed. Unlike the "release" helpers, the
3224 * "set" helpers are not to be used when the pfn might point at garbage.
3225 */
kvm_set_pfn_dirty(kvm_pfn_t pfn)3226 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3227 {
3228 if (WARN_ON(is_error_noslot_pfn(pfn)))
3229 return;
3230
3231 if (pfn_valid(pfn))
3232 kvm_set_page_dirty(pfn_to_page(pfn));
3233 }
3234 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3235
kvm_set_pfn_accessed(kvm_pfn_t pfn)3236 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3237 {
3238 if (WARN_ON(is_error_noslot_pfn(pfn)))
3239 return;
3240
3241 if (pfn_valid(pfn))
3242 kvm_set_page_accessed(pfn_to_page(pfn));
3243 }
3244 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3245
next_segment(unsigned long len,int offset)3246 static int next_segment(unsigned long len, int offset)
3247 {
3248 if (len > PAGE_SIZE - offset)
3249 return PAGE_SIZE - offset;
3250 else
3251 return len;
3252 }
3253
3254 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)3255 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3256 void *data, int offset, int len)
3257 {
3258 int r;
3259 unsigned long addr;
3260
3261 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3262 return -EFAULT;
3263
3264 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3265 if (kvm_is_error_hva(addr))
3266 return -EFAULT;
3267 r = __copy_from_user(data, (void __user *)addr + offset, len);
3268 if (r)
3269 return -EFAULT;
3270 return 0;
3271 }
3272
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)3273 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3274 int len)
3275 {
3276 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3277
3278 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3279 }
3280 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3281
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)3282 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3283 int offset, int len)
3284 {
3285 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3286
3287 return __kvm_read_guest_page(slot, gfn, data, offset, len);
3288 }
3289 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3290
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)3291 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3292 {
3293 gfn_t gfn = gpa >> PAGE_SHIFT;
3294 int seg;
3295 int offset = offset_in_page(gpa);
3296 int ret;
3297
3298 while ((seg = next_segment(len, offset)) != 0) {
3299 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3300 if (ret < 0)
3301 return ret;
3302 offset = 0;
3303 len -= seg;
3304 data += seg;
3305 ++gfn;
3306 }
3307 return 0;
3308 }
3309 EXPORT_SYMBOL_GPL(kvm_read_guest);
3310
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3311 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3312 {
3313 gfn_t gfn = gpa >> PAGE_SHIFT;
3314 int seg;
3315 int offset = offset_in_page(gpa);
3316 int ret;
3317
3318 while ((seg = next_segment(len, offset)) != 0) {
3319 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3320 if (ret < 0)
3321 return ret;
3322 offset = 0;
3323 len -= seg;
3324 data += seg;
3325 ++gfn;
3326 }
3327 return 0;
3328 }
3329 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3330
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)3331 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3332 void *data, int offset, unsigned long len)
3333 {
3334 int r;
3335 unsigned long addr;
3336
3337 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3338 return -EFAULT;
3339
3340 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3341 if (kvm_is_error_hva(addr))
3342 return -EFAULT;
3343 pagefault_disable();
3344 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3345 pagefault_enable();
3346 if (r)
3347 return -EFAULT;
3348 return 0;
3349 }
3350
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3351 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3352 void *data, unsigned long len)
3353 {
3354 gfn_t gfn = gpa >> PAGE_SHIFT;
3355 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3356 int offset = offset_in_page(gpa);
3357
3358 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3359 }
3360 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3361
3362 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)3363 static int __kvm_write_guest_page(struct kvm *kvm,
3364 struct kvm_memory_slot *memslot, gfn_t gfn,
3365 const void *data, int offset, int len)
3366 {
3367 int r;
3368 unsigned long addr;
3369
3370 if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3371 return -EFAULT;
3372
3373 addr = gfn_to_hva_memslot(memslot, gfn);
3374 if (kvm_is_error_hva(addr))
3375 return -EFAULT;
3376 r = __copy_to_user((void __user *)addr + offset, data, len);
3377 if (r)
3378 return -EFAULT;
3379 mark_page_dirty_in_slot(kvm, memslot, gfn);
3380 return 0;
3381 }
3382
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)3383 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3384 const void *data, int offset, int len)
3385 {
3386 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3387
3388 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3389 }
3390 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3391
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)3392 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3393 const void *data, int offset, int len)
3394 {
3395 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3396
3397 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3398 }
3399 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3400
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)3401 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3402 unsigned long len)
3403 {
3404 gfn_t gfn = gpa >> PAGE_SHIFT;
3405 int seg;
3406 int offset = offset_in_page(gpa);
3407 int ret;
3408
3409 while ((seg = next_segment(len, offset)) != 0) {
3410 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3411 if (ret < 0)
3412 return ret;
3413 offset = 0;
3414 len -= seg;
3415 data += seg;
3416 ++gfn;
3417 }
3418 return 0;
3419 }
3420 EXPORT_SYMBOL_GPL(kvm_write_guest);
3421
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)3422 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3423 unsigned long len)
3424 {
3425 gfn_t gfn = gpa >> PAGE_SHIFT;
3426 int seg;
3427 int offset = offset_in_page(gpa);
3428 int ret;
3429
3430 while ((seg = next_segment(len, offset)) != 0) {
3431 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3432 if (ret < 0)
3433 return ret;
3434 offset = 0;
3435 len -= seg;
3436 data += seg;
3437 ++gfn;
3438 }
3439 return 0;
3440 }
3441 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3442
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3443 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3444 struct gfn_to_hva_cache *ghc,
3445 gpa_t gpa, unsigned long len)
3446 {
3447 int offset = offset_in_page(gpa);
3448 gfn_t start_gfn = gpa >> PAGE_SHIFT;
3449 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3450 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3451 gfn_t nr_pages_avail;
3452
3453 /* Update ghc->generation before performing any error checks. */
3454 ghc->generation = slots->generation;
3455
3456 if (start_gfn > end_gfn) {
3457 ghc->hva = KVM_HVA_ERR_BAD;
3458 return -EINVAL;
3459 }
3460
3461 /*
3462 * If the requested region crosses two memslots, we still
3463 * verify that the entire region is valid here.
3464 */
3465 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3466 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3467 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3468 &nr_pages_avail);
3469 if (kvm_is_error_hva(ghc->hva))
3470 return -EFAULT;
3471 }
3472
3473 /* Use the slow path for cross page reads and writes. */
3474 if (nr_pages_needed == 1)
3475 ghc->hva += offset;
3476 else
3477 ghc->memslot = NULL;
3478
3479 ghc->gpa = gpa;
3480 ghc->len = len;
3481 return 0;
3482 }
3483
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3484 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3485 gpa_t gpa, unsigned long len)
3486 {
3487 struct kvm_memslots *slots = kvm_memslots(kvm);
3488 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3489 }
3490 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3491
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3492 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3493 void *data, unsigned int offset,
3494 unsigned long len)
3495 {
3496 struct kvm_memslots *slots = kvm_memslots(kvm);
3497 int r;
3498 gpa_t gpa = ghc->gpa + offset;
3499
3500 if (WARN_ON_ONCE(len + offset > ghc->len))
3501 return -EINVAL;
3502
3503 if (slots->generation != ghc->generation) {
3504 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3505 return -EFAULT;
3506 }
3507
3508 if (kvm_is_error_hva(ghc->hva))
3509 return -EFAULT;
3510
3511 if (unlikely(!ghc->memslot))
3512 return kvm_write_guest(kvm, gpa, data, len);
3513
3514 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3515 if (r)
3516 return -EFAULT;
3517 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3518
3519 return 0;
3520 }
3521 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3522
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3523 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3524 void *data, unsigned long len)
3525 {
3526 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3527 }
3528 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3529
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3530 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3531 void *data, unsigned int offset,
3532 unsigned long len)
3533 {
3534 struct kvm_memslots *slots = kvm_memslots(kvm);
3535 int r;
3536 gpa_t gpa = ghc->gpa + offset;
3537
3538 if (WARN_ON_ONCE(len + offset > ghc->len))
3539 return -EINVAL;
3540
3541 if (slots->generation != ghc->generation) {
3542 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3543 return -EFAULT;
3544 }
3545
3546 if (kvm_is_error_hva(ghc->hva))
3547 return -EFAULT;
3548
3549 if (unlikely(!ghc->memslot))
3550 return kvm_read_guest(kvm, gpa, data, len);
3551
3552 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3553 if (r)
3554 return -EFAULT;
3555
3556 return 0;
3557 }
3558 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3559
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3560 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3561 void *data, unsigned long len)
3562 {
3563 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3564 }
3565 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3566
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3567 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3568 {
3569 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3570 gfn_t gfn = gpa >> PAGE_SHIFT;
3571 int seg;
3572 int offset = offset_in_page(gpa);
3573 int ret;
3574
3575 while ((seg = next_segment(len, offset)) != 0) {
3576 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
3577 if (ret < 0)
3578 return ret;
3579 offset = 0;
3580 len -= seg;
3581 ++gfn;
3582 }
3583 return 0;
3584 }
3585 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3586
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)3587 void mark_page_dirty_in_slot(struct kvm *kvm,
3588 const struct kvm_memory_slot *memslot,
3589 gfn_t gfn)
3590 {
3591 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3592
3593 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3594 if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3595 return;
3596
3597 WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3598 #endif
3599
3600 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3601 unsigned long rel_gfn = gfn - memslot->base_gfn;
3602 u32 slot = (memslot->as_id << 16) | memslot->id;
3603
3604 if (kvm->dirty_ring_size && vcpu)
3605 kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3606 else if (memslot->dirty_bitmap)
3607 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3608 }
3609 }
3610 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3611
mark_page_dirty(struct kvm * kvm,gfn_t gfn)3612 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3613 {
3614 struct kvm_memory_slot *memslot;
3615
3616 memslot = gfn_to_memslot(kvm, gfn);
3617 mark_page_dirty_in_slot(kvm, memslot, gfn);
3618 }
3619 EXPORT_SYMBOL_GPL(mark_page_dirty);
3620
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)3621 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3622 {
3623 struct kvm_memory_slot *memslot;
3624
3625 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3626 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3627 }
3628 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3629
kvm_sigset_activate(struct kvm_vcpu * vcpu)3630 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3631 {
3632 if (!vcpu->sigset_active)
3633 return;
3634
3635 /*
3636 * This does a lockless modification of ->real_blocked, which is fine
3637 * because, only current can change ->real_blocked and all readers of
3638 * ->real_blocked don't care as long ->real_blocked is always a subset
3639 * of ->blocked.
3640 */
3641 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
3642 }
3643
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3644 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3645 {
3646 if (!vcpu->sigset_active)
3647 return;
3648
3649 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
3650 sigemptyset(¤t->real_blocked);
3651 }
3652
grow_halt_poll_ns(struct kvm_vcpu * vcpu)3653 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3654 {
3655 unsigned int old, val, grow, grow_start;
3656
3657 old = val = vcpu->halt_poll_ns;
3658 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3659 grow = READ_ONCE(halt_poll_ns_grow);
3660 if (!grow)
3661 goto out;
3662
3663 val *= grow;
3664 if (val < grow_start)
3665 val = grow_start;
3666
3667 vcpu->halt_poll_ns = val;
3668 out:
3669 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3670 }
3671
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3672 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3673 {
3674 unsigned int old, val, shrink, grow_start;
3675
3676 old = val = vcpu->halt_poll_ns;
3677 shrink = READ_ONCE(halt_poll_ns_shrink);
3678 grow_start = READ_ONCE(halt_poll_ns_grow_start);
3679 if (shrink == 0)
3680 val = 0;
3681 else
3682 val /= shrink;
3683
3684 if (val < grow_start)
3685 val = 0;
3686
3687 vcpu->halt_poll_ns = val;
3688 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3689 }
3690
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)3691 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3692 {
3693 int ret = -EINTR;
3694 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3695
3696 if (kvm_arch_vcpu_runnable(vcpu))
3697 goto out;
3698 if (kvm_cpu_has_pending_timer(vcpu))
3699 goto out;
3700 if (signal_pending(current))
3701 goto out;
3702 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3703 goto out;
3704
3705 ret = 0;
3706 out:
3707 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3708 return ret;
3709 }
3710
3711 /*
3712 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3713 * pending. This is mostly used when halting a vCPU, but may also be used
3714 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3715 */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3716 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3717 {
3718 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3719 bool waited = false;
3720
3721 vcpu->stat.generic.blocking = 1;
3722
3723 preempt_disable();
3724 kvm_arch_vcpu_blocking(vcpu);
3725 prepare_to_rcuwait(wait);
3726 preempt_enable();
3727
3728 for (;;) {
3729 set_current_state(TASK_INTERRUPTIBLE);
3730
3731 if (kvm_vcpu_check_block(vcpu) < 0)
3732 break;
3733
3734 waited = true;
3735 schedule();
3736 }
3737
3738 preempt_disable();
3739 finish_rcuwait(wait);
3740 kvm_arch_vcpu_unblocking(vcpu);
3741 preempt_enable();
3742
3743 vcpu->stat.generic.blocking = 0;
3744
3745 return waited;
3746 }
3747
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)3748 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3749 ktime_t end, bool success)
3750 {
3751 struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3752 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3753
3754 ++vcpu->stat.generic.halt_attempted_poll;
3755
3756 if (success) {
3757 ++vcpu->stat.generic.halt_successful_poll;
3758
3759 if (!vcpu_valid_wakeup(vcpu))
3760 ++vcpu->stat.generic.halt_poll_invalid;
3761
3762 stats->halt_poll_success_ns += poll_ns;
3763 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3764 } else {
3765 stats->halt_poll_fail_ns += poll_ns;
3766 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3767 }
3768 }
3769
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3770 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3771 {
3772 struct kvm *kvm = vcpu->kvm;
3773
3774 if (kvm->override_halt_poll_ns) {
3775 /*
3776 * Ensure kvm->max_halt_poll_ns is not read before
3777 * kvm->override_halt_poll_ns.
3778 *
3779 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3780 */
3781 smp_rmb();
3782 return READ_ONCE(kvm->max_halt_poll_ns);
3783 }
3784
3785 return READ_ONCE(halt_poll_ns);
3786 }
3787
3788 /*
3789 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt
3790 * polling is enabled, busy wait for a short time before blocking to avoid the
3791 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3792 * is halted.
3793 */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3794 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3795 {
3796 unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3797 bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3798 ktime_t start, cur, poll_end;
3799 bool waited = false;
3800 bool do_halt_poll;
3801 u64 halt_ns;
3802
3803 if (vcpu->halt_poll_ns > max_halt_poll_ns)
3804 vcpu->halt_poll_ns = max_halt_poll_ns;
3805
3806 do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3807
3808 start = cur = poll_end = ktime_get();
3809 if (do_halt_poll) {
3810 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3811
3812 do {
3813 if (kvm_vcpu_check_block(vcpu) < 0)
3814 goto out;
3815 cpu_relax();
3816 poll_end = cur = ktime_get();
3817 } while (kvm_vcpu_can_poll(cur, stop));
3818 }
3819
3820 waited = kvm_vcpu_block(vcpu);
3821
3822 cur = ktime_get();
3823 if (waited) {
3824 vcpu->stat.generic.halt_wait_ns +=
3825 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3826 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3827 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3828 }
3829 out:
3830 /* The total time the vCPU was "halted", including polling time. */
3831 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3832
3833 /*
3834 * Note, halt-polling is considered successful so long as the vCPU was
3835 * never actually scheduled out, i.e. even if the wake event arrived
3836 * after of the halt-polling loop itself, but before the full wait.
3837 */
3838 if (do_halt_poll)
3839 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3840
3841 if (halt_poll_allowed) {
3842 /* Recompute the max halt poll time in case it changed. */
3843 max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3844
3845 if (!vcpu_valid_wakeup(vcpu)) {
3846 shrink_halt_poll_ns(vcpu);
3847 } else if (max_halt_poll_ns) {
3848 if (halt_ns <= vcpu->halt_poll_ns)
3849 ;
3850 /* we had a long block, shrink polling */
3851 else if (vcpu->halt_poll_ns &&
3852 halt_ns > max_halt_poll_ns)
3853 shrink_halt_poll_ns(vcpu);
3854 /* we had a short halt and our poll time is too small */
3855 else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3856 halt_ns < max_halt_poll_ns)
3857 grow_halt_poll_ns(vcpu);
3858 } else {
3859 vcpu->halt_poll_ns = 0;
3860 }
3861 }
3862
3863 trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3864 }
3865 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3866
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3867 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3868 {
3869 if (__kvm_vcpu_wake_up(vcpu)) {
3870 WRITE_ONCE(vcpu->ready, true);
3871 ++vcpu->stat.generic.halt_wakeup;
3872 return true;
3873 }
3874
3875 return false;
3876 }
3877 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3878
3879 #ifndef CONFIG_S390
3880 /*
3881 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3882 */
kvm_vcpu_kick(struct kvm_vcpu * vcpu)3883 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3884 {
3885 int me, cpu;
3886
3887 if (kvm_vcpu_wake_up(vcpu))
3888 return;
3889
3890 me = get_cpu();
3891 /*
3892 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3893 * to EXITING_GUEST_MODE. Therefore the moderately expensive "should
3894 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3895 * within the vCPU thread itself.
3896 */
3897 if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3898 if (vcpu->mode == IN_GUEST_MODE)
3899 WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3900 goto out;
3901 }
3902
3903 /*
3904 * Note, the vCPU could get migrated to a different pCPU at any point
3905 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3906 * IPI to the previous pCPU. But, that's ok because the purpose of the
3907 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3908 * vCPU also requires it to leave IN_GUEST_MODE.
3909 */
3910 if (kvm_arch_vcpu_should_kick(vcpu)) {
3911 cpu = READ_ONCE(vcpu->cpu);
3912 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3913 smp_send_reschedule(cpu);
3914 }
3915 out:
3916 put_cpu();
3917 }
3918 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3919 #endif /* !CONFIG_S390 */
3920
kvm_vcpu_yield_to(struct kvm_vcpu * target)3921 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3922 {
3923 struct pid *pid;
3924 struct task_struct *task = NULL;
3925 int ret = 0;
3926
3927 rcu_read_lock();
3928 pid = rcu_dereference(target->pid);
3929 if (pid)
3930 task = get_pid_task(pid, PIDTYPE_PID);
3931 rcu_read_unlock();
3932 if (!task)
3933 return ret;
3934 ret = yield_to(task, 1);
3935 put_task_struct(task);
3936
3937 return ret;
3938 }
3939 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3940
3941 /*
3942 * Helper that checks whether a VCPU is eligible for directed yield.
3943 * Most eligible candidate to yield is decided by following heuristics:
3944 *
3945 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3946 * (preempted lock holder), indicated by @in_spin_loop.
3947 * Set at the beginning and cleared at the end of interception/PLE handler.
3948 *
3949 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3950 * chance last time (mostly it has become eligible now since we have probably
3951 * yielded to lockholder in last iteration. This is done by toggling
3952 * @dy_eligible each time a VCPU checked for eligibility.)
3953 *
3954 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3955 * to preempted lock-holder could result in wrong VCPU selection and CPU
3956 * burning. Giving priority for a potential lock-holder increases lock
3957 * progress.
3958 *
3959 * Since algorithm is based on heuristics, accessing another VCPU data without
3960 * locking does not harm. It may result in trying to yield to same VCPU, fail
3961 * and continue with next VCPU and so on.
3962 */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)3963 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3964 {
3965 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3966 bool eligible;
3967
3968 eligible = !vcpu->spin_loop.in_spin_loop ||
3969 vcpu->spin_loop.dy_eligible;
3970
3971 if (vcpu->spin_loop.in_spin_loop)
3972 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3973
3974 return eligible;
3975 #else
3976 return true;
3977 #endif
3978 }
3979
3980 /*
3981 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3982 * a vcpu_load/vcpu_put pair. However, for most architectures
3983 * kvm_arch_vcpu_runnable does not require vcpu_load.
3984 */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3985 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3986 {
3987 return kvm_arch_vcpu_runnable(vcpu);
3988 }
3989
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3990 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3991 {
3992 if (kvm_arch_dy_runnable(vcpu))
3993 return true;
3994
3995 #ifdef CONFIG_KVM_ASYNC_PF
3996 if (!list_empty_careful(&vcpu->async_pf.done))
3997 return true;
3998 #endif
3999
4000 return false;
4001 }
4002
4003 /*
4004 * By default, simply query the target vCPU's current mode when checking if a
4005 * vCPU was preempted in kernel mode. All architectures except x86 (or more
4006 * specifical, except VMX) allow querying whether or not a vCPU is in kernel
4007 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
4008 * directly for cross-vCPU checks is functionally correct and accurate.
4009 */
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)4010 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
4011 {
4012 return kvm_arch_vcpu_in_kernel(vcpu);
4013 }
4014
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)4015 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4016 {
4017 return false;
4018 }
4019
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)4020 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4021 {
4022 struct kvm *kvm = me->kvm;
4023 struct kvm_vcpu *vcpu;
4024 int last_boosted_vcpu;
4025 unsigned long i;
4026 int yielded = 0;
4027 int try = 3;
4028 int pass;
4029
4030 last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
4031 kvm_vcpu_set_in_spin_loop(me, true);
4032 /*
4033 * We boost the priority of a VCPU that is runnable but not
4034 * currently running, because it got preempted by something
4035 * else and called schedule in __vcpu_run. Hopefully that
4036 * VCPU is holding the lock that we need and will release it.
4037 * We approximate round-robin by starting at the last boosted VCPU.
4038 */
4039 for (pass = 0; pass < 2 && !yielded && try; pass++) {
4040 kvm_for_each_vcpu(i, vcpu, kvm) {
4041 if (!pass && i <= last_boosted_vcpu) {
4042 i = last_boosted_vcpu;
4043 continue;
4044 } else if (pass && i > last_boosted_vcpu)
4045 break;
4046 if (!READ_ONCE(vcpu->ready))
4047 continue;
4048 if (vcpu == me)
4049 continue;
4050 if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4051 continue;
4052
4053 /*
4054 * Treat the target vCPU as being in-kernel if it has a
4055 * pending interrupt, as the vCPU trying to yield may
4056 * be spinning waiting on IPI delivery, i.e. the target
4057 * vCPU is in-kernel for the purposes of directed yield.
4058 */
4059 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4060 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4061 !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4062 continue;
4063 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4064 continue;
4065
4066 yielded = kvm_vcpu_yield_to(vcpu);
4067 if (yielded > 0) {
4068 WRITE_ONCE(kvm->last_boosted_vcpu, i);
4069 break;
4070 } else if (yielded < 0) {
4071 try--;
4072 if (!try)
4073 break;
4074 }
4075 }
4076 }
4077 kvm_vcpu_set_in_spin_loop(me, false);
4078
4079 /* Ensure vcpu is not eligible during next spinloop */
4080 kvm_vcpu_set_dy_eligible(me, false);
4081 }
4082 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4083
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)4084 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4085 {
4086 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4087 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4088 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4089 kvm->dirty_ring_size / PAGE_SIZE);
4090 #else
4091 return false;
4092 #endif
4093 }
4094
kvm_vcpu_fault(struct vm_fault * vmf)4095 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4096 {
4097 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4098 struct page *page;
4099
4100 if (vmf->pgoff == 0)
4101 page = virt_to_page(vcpu->run);
4102 #ifdef CONFIG_X86
4103 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4104 page = virt_to_page(vcpu->arch.pio_data);
4105 #endif
4106 #ifdef CONFIG_KVM_MMIO
4107 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4108 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4109 #endif
4110 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4111 page = kvm_dirty_ring_get_page(
4112 &vcpu->dirty_ring,
4113 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4114 else
4115 return kvm_arch_vcpu_fault(vcpu, vmf);
4116 get_page(page);
4117 vmf->page = page;
4118 return 0;
4119 }
4120
4121 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4122 .fault = kvm_vcpu_fault,
4123 };
4124
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)4125 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4126 {
4127 struct kvm_vcpu *vcpu = file->private_data;
4128 unsigned long pages = vma_pages(vma);
4129
4130 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4131 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4132 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4133 return -EINVAL;
4134
4135 vma->vm_ops = &kvm_vcpu_vm_ops;
4136 return 0;
4137 }
4138
kvm_vcpu_release(struct inode * inode,struct file * filp)4139 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4140 {
4141 struct kvm_vcpu *vcpu = filp->private_data;
4142
4143 kvm_put_kvm(vcpu->kvm);
4144 return 0;
4145 }
4146
4147 static struct file_operations kvm_vcpu_fops = {
4148 .release = kvm_vcpu_release,
4149 .unlocked_ioctl = kvm_vcpu_ioctl,
4150 .mmap = kvm_vcpu_mmap,
4151 .llseek = noop_llseek,
4152 KVM_COMPAT(kvm_vcpu_compat_ioctl),
4153 };
4154
4155 /*
4156 * Allocates an inode for the vcpu.
4157 */
create_vcpu_fd(struct kvm_vcpu * vcpu)4158 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4159 {
4160 char name[8 + 1 + ITOA_MAX_LEN + 1];
4161
4162 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4163 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4164 }
4165
4166 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)4167 static int vcpu_get_pid(void *data, u64 *val)
4168 {
4169 struct kvm_vcpu *vcpu = data;
4170
4171 rcu_read_lock();
4172 *val = pid_nr(rcu_dereference(vcpu->pid));
4173 rcu_read_unlock();
4174 return 0;
4175 }
4176
4177 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4178
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)4179 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4180 {
4181 struct dentry *debugfs_dentry;
4182 char dir_name[ITOA_MAX_LEN * 2];
4183
4184 if (!debugfs_initialized())
4185 return;
4186
4187 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4188 debugfs_dentry = debugfs_create_dir(dir_name,
4189 vcpu->kvm->debugfs_dentry);
4190 debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4191 &vcpu_get_pid_fops);
4192
4193 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4194 }
4195 #endif
4196
4197 /*
4198 * Creates some virtual cpus. Good luck creating more than one.
4199 */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id)4200 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4201 {
4202 int r;
4203 struct kvm_vcpu *vcpu;
4204 struct page *page;
4205
4206 /*
4207 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4208 * too-large values instead of silently truncating.
4209 *
4210 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4211 * changing the storage type (at the very least, IDs should be tracked
4212 * as unsigned ints).
4213 */
4214 BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4215 if (id >= KVM_MAX_VCPU_IDS)
4216 return -EINVAL;
4217
4218 mutex_lock(&kvm->lock);
4219 if (kvm->created_vcpus >= kvm->max_vcpus) {
4220 mutex_unlock(&kvm->lock);
4221 return -EINVAL;
4222 }
4223
4224 r = kvm_arch_vcpu_precreate(kvm, id);
4225 if (r) {
4226 mutex_unlock(&kvm->lock);
4227 return r;
4228 }
4229
4230 kvm->created_vcpus++;
4231 mutex_unlock(&kvm->lock);
4232
4233 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4234 if (!vcpu) {
4235 r = -ENOMEM;
4236 goto vcpu_decrement;
4237 }
4238
4239 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4240 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4241 if (!page) {
4242 r = -ENOMEM;
4243 goto vcpu_free;
4244 }
4245 vcpu->run = page_address(page);
4246
4247 kvm_vcpu_init(vcpu, kvm, id);
4248
4249 r = kvm_arch_vcpu_create(vcpu);
4250 if (r)
4251 goto vcpu_free_run_page;
4252
4253 if (kvm->dirty_ring_size) {
4254 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4255 id, kvm->dirty_ring_size);
4256 if (r)
4257 goto arch_vcpu_destroy;
4258 }
4259
4260 mutex_lock(&kvm->lock);
4261
4262 #ifdef CONFIG_LOCKDEP
4263 /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4264 mutex_lock(&vcpu->mutex);
4265 mutex_unlock(&vcpu->mutex);
4266 #endif
4267
4268 if (kvm_get_vcpu_by_id(kvm, id)) {
4269 r = -EEXIST;
4270 goto unlock_vcpu_destroy;
4271 }
4272
4273 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4274 r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4275 if (r)
4276 goto unlock_vcpu_destroy;
4277
4278 /* Now it's all set up, let userspace reach it */
4279 kvm_get_kvm(kvm);
4280 r = create_vcpu_fd(vcpu);
4281 if (r < 0)
4282 goto kvm_put_xa_release;
4283
4284 if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4285 r = -EINVAL;
4286 goto kvm_put_xa_release;
4287 }
4288
4289 /*
4290 * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu
4291 * pointer before kvm->online_vcpu's incremented value.
4292 */
4293 smp_wmb();
4294 atomic_inc(&kvm->online_vcpus);
4295
4296 mutex_unlock(&kvm->lock);
4297 kvm_arch_vcpu_postcreate(vcpu);
4298 kvm_create_vcpu_debugfs(vcpu);
4299 return r;
4300
4301 kvm_put_xa_release:
4302 kvm_put_kvm_no_destroy(kvm);
4303 xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4304 unlock_vcpu_destroy:
4305 mutex_unlock(&kvm->lock);
4306 kvm_dirty_ring_free(&vcpu->dirty_ring);
4307 arch_vcpu_destroy:
4308 kvm_arch_vcpu_destroy(vcpu);
4309 vcpu_free_run_page:
4310 free_page((unsigned long)vcpu->run);
4311 vcpu_free:
4312 kmem_cache_free(kvm_vcpu_cache, vcpu);
4313 vcpu_decrement:
4314 mutex_lock(&kvm->lock);
4315 kvm->created_vcpus--;
4316 mutex_unlock(&kvm->lock);
4317 return r;
4318 }
4319
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4320 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4321 {
4322 if (sigset) {
4323 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4324 vcpu->sigset_active = 1;
4325 vcpu->sigset = *sigset;
4326 } else
4327 vcpu->sigset_active = 0;
4328 return 0;
4329 }
4330
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4331 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4332 size_t size, loff_t *offset)
4333 {
4334 struct kvm_vcpu *vcpu = file->private_data;
4335
4336 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4337 &kvm_vcpu_stats_desc[0], &vcpu->stat,
4338 sizeof(vcpu->stat), user_buffer, size, offset);
4339 }
4340
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4341 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4342 {
4343 struct kvm_vcpu *vcpu = file->private_data;
4344
4345 kvm_put_kvm(vcpu->kvm);
4346 return 0;
4347 }
4348
4349 static const struct file_operations kvm_vcpu_stats_fops = {
4350 .owner = THIS_MODULE,
4351 .read = kvm_vcpu_stats_read,
4352 .release = kvm_vcpu_stats_release,
4353 .llseek = noop_llseek,
4354 };
4355
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)4356 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4357 {
4358 int fd;
4359 struct file *file;
4360 char name[15 + ITOA_MAX_LEN + 1];
4361
4362 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4363
4364 fd = get_unused_fd_flags(O_CLOEXEC);
4365 if (fd < 0)
4366 return fd;
4367
4368 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4369 if (IS_ERR(file)) {
4370 put_unused_fd(fd);
4371 return PTR_ERR(file);
4372 }
4373
4374 kvm_get_kvm(vcpu->kvm);
4375
4376 file->f_mode |= FMODE_PREAD;
4377 fd_install(fd, file);
4378
4379 return fd;
4380 }
4381
4382 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
kvm_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4383 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4384 struct kvm_pre_fault_memory *range)
4385 {
4386 int idx;
4387 long r;
4388 u64 full_size;
4389
4390 if (range->flags)
4391 return -EINVAL;
4392
4393 if (!PAGE_ALIGNED(range->gpa) ||
4394 !PAGE_ALIGNED(range->size) ||
4395 range->gpa + range->size <= range->gpa)
4396 return -EINVAL;
4397
4398 vcpu_load(vcpu);
4399 idx = srcu_read_lock(&vcpu->kvm->srcu);
4400
4401 full_size = range->size;
4402 do {
4403 if (signal_pending(current)) {
4404 r = -EINTR;
4405 break;
4406 }
4407
4408 r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4409 if (WARN_ON_ONCE(r == 0 || r == -EIO))
4410 break;
4411
4412 if (r < 0)
4413 break;
4414
4415 range->size -= r;
4416 range->gpa += r;
4417 cond_resched();
4418 } while (range->size);
4419
4420 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4421 vcpu_put(vcpu);
4422
4423 /* Return success if at least one page was mapped successfully. */
4424 return full_size == range->size ? r : 0;
4425 }
4426 #endif
4427
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4428 static long kvm_vcpu_ioctl(struct file *filp,
4429 unsigned int ioctl, unsigned long arg)
4430 {
4431 struct kvm_vcpu *vcpu = filp->private_data;
4432 void __user *argp = (void __user *)arg;
4433 int r;
4434 struct kvm_fpu *fpu = NULL;
4435 struct kvm_sregs *kvm_sregs = NULL;
4436
4437 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4438 return -EIO;
4439
4440 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4441 return -EINVAL;
4442
4443 /*
4444 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4445 * execution; mutex_lock() would break them.
4446 */
4447 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4448 if (r != -ENOIOCTLCMD)
4449 return r;
4450
4451 if (mutex_lock_killable(&vcpu->mutex))
4452 return -EINTR;
4453 switch (ioctl) {
4454 case KVM_RUN: {
4455 struct pid *oldpid;
4456 r = -EINVAL;
4457 if (arg)
4458 goto out;
4459 oldpid = rcu_access_pointer(vcpu->pid);
4460 if (unlikely(oldpid != task_pid(current))) {
4461 /* The thread running this VCPU changed. */
4462 struct pid *newpid;
4463
4464 r = kvm_arch_vcpu_run_pid_change(vcpu);
4465 if (r)
4466 break;
4467
4468 newpid = get_task_pid(current, PIDTYPE_PID);
4469 rcu_assign_pointer(vcpu->pid, newpid);
4470 if (oldpid)
4471 synchronize_rcu();
4472 put_pid(oldpid);
4473 }
4474 vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4475 r = kvm_arch_vcpu_ioctl_run(vcpu);
4476 vcpu->wants_to_run = false;
4477
4478 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4479 break;
4480 }
4481 case KVM_GET_REGS: {
4482 struct kvm_regs *kvm_regs;
4483
4484 r = -ENOMEM;
4485 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
4486 if (!kvm_regs)
4487 goto out;
4488 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4489 if (r)
4490 goto out_free1;
4491 r = -EFAULT;
4492 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4493 goto out_free1;
4494 r = 0;
4495 out_free1:
4496 kfree(kvm_regs);
4497 break;
4498 }
4499 case KVM_SET_REGS: {
4500 struct kvm_regs *kvm_regs;
4501
4502 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4503 if (IS_ERR(kvm_regs)) {
4504 r = PTR_ERR(kvm_regs);
4505 goto out;
4506 }
4507 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4508 kfree(kvm_regs);
4509 break;
4510 }
4511 case KVM_GET_SREGS: {
4512 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
4513 r = -ENOMEM;
4514 if (!kvm_sregs)
4515 goto out;
4516 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4517 if (r)
4518 goto out;
4519 r = -EFAULT;
4520 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4521 goto out;
4522 r = 0;
4523 break;
4524 }
4525 case KVM_SET_SREGS: {
4526 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4527 if (IS_ERR(kvm_sregs)) {
4528 r = PTR_ERR(kvm_sregs);
4529 kvm_sregs = NULL;
4530 goto out;
4531 }
4532 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4533 break;
4534 }
4535 case KVM_GET_MP_STATE: {
4536 struct kvm_mp_state mp_state;
4537
4538 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4539 if (r)
4540 goto out;
4541 r = -EFAULT;
4542 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4543 goto out;
4544 r = 0;
4545 break;
4546 }
4547 case KVM_SET_MP_STATE: {
4548 struct kvm_mp_state mp_state;
4549
4550 r = -EFAULT;
4551 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4552 goto out;
4553 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4554 break;
4555 }
4556 case KVM_TRANSLATE: {
4557 struct kvm_translation tr;
4558
4559 r = -EFAULT;
4560 if (copy_from_user(&tr, argp, sizeof(tr)))
4561 goto out;
4562 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4563 if (r)
4564 goto out;
4565 r = -EFAULT;
4566 if (copy_to_user(argp, &tr, sizeof(tr)))
4567 goto out;
4568 r = 0;
4569 break;
4570 }
4571 case KVM_SET_GUEST_DEBUG: {
4572 struct kvm_guest_debug dbg;
4573
4574 r = -EFAULT;
4575 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4576 goto out;
4577 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4578 break;
4579 }
4580 case KVM_SET_SIGNAL_MASK: {
4581 struct kvm_signal_mask __user *sigmask_arg = argp;
4582 struct kvm_signal_mask kvm_sigmask;
4583 sigset_t sigset, *p;
4584
4585 p = NULL;
4586 if (argp) {
4587 r = -EFAULT;
4588 if (copy_from_user(&kvm_sigmask, argp,
4589 sizeof(kvm_sigmask)))
4590 goto out;
4591 r = -EINVAL;
4592 if (kvm_sigmask.len != sizeof(sigset))
4593 goto out;
4594 r = -EFAULT;
4595 if (copy_from_user(&sigset, sigmask_arg->sigset,
4596 sizeof(sigset)))
4597 goto out;
4598 p = &sigset;
4599 }
4600 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4601 break;
4602 }
4603 case KVM_GET_FPU: {
4604 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
4605 r = -ENOMEM;
4606 if (!fpu)
4607 goto out;
4608 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4609 if (r)
4610 goto out;
4611 r = -EFAULT;
4612 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4613 goto out;
4614 r = 0;
4615 break;
4616 }
4617 case KVM_SET_FPU: {
4618 fpu = memdup_user(argp, sizeof(*fpu));
4619 if (IS_ERR(fpu)) {
4620 r = PTR_ERR(fpu);
4621 fpu = NULL;
4622 goto out;
4623 }
4624 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4625 break;
4626 }
4627 case KVM_GET_STATS_FD: {
4628 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4629 break;
4630 }
4631 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4632 case KVM_PRE_FAULT_MEMORY: {
4633 struct kvm_pre_fault_memory range;
4634
4635 r = -EFAULT;
4636 if (copy_from_user(&range, argp, sizeof(range)))
4637 break;
4638 r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4639 /* Pass back leftover range. */
4640 if (copy_to_user(argp, &range, sizeof(range)))
4641 r = -EFAULT;
4642 break;
4643 }
4644 #endif
4645 default:
4646 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4647 }
4648 out:
4649 mutex_unlock(&vcpu->mutex);
4650 kfree(fpu);
4651 kfree(kvm_sregs);
4652 return r;
4653 }
4654
4655 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4656 static long kvm_vcpu_compat_ioctl(struct file *filp,
4657 unsigned int ioctl, unsigned long arg)
4658 {
4659 struct kvm_vcpu *vcpu = filp->private_data;
4660 void __user *argp = compat_ptr(arg);
4661 int r;
4662
4663 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4664 return -EIO;
4665
4666 switch (ioctl) {
4667 case KVM_SET_SIGNAL_MASK: {
4668 struct kvm_signal_mask __user *sigmask_arg = argp;
4669 struct kvm_signal_mask kvm_sigmask;
4670 sigset_t sigset;
4671
4672 if (argp) {
4673 r = -EFAULT;
4674 if (copy_from_user(&kvm_sigmask, argp,
4675 sizeof(kvm_sigmask)))
4676 goto out;
4677 r = -EINVAL;
4678 if (kvm_sigmask.len != sizeof(compat_sigset_t))
4679 goto out;
4680 r = -EFAULT;
4681 if (get_compat_sigset(&sigset,
4682 (compat_sigset_t __user *)sigmask_arg->sigset))
4683 goto out;
4684 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4685 } else
4686 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4687 break;
4688 }
4689 default:
4690 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4691 }
4692
4693 out:
4694 return r;
4695 }
4696 #endif
4697
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4698 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4699 {
4700 struct kvm_device *dev = filp->private_data;
4701
4702 if (dev->ops->mmap)
4703 return dev->ops->mmap(dev, vma);
4704
4705 return -ENODEV;
4706 }
4707
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4708 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4709 int (*accessor)(struct kvm_device *dev,
4710 struct kvm_device_attr *attr),
4711 unsigned long arg)
4712 {
4713 struct kvm_device_attr attr;
4714
4715 if (!accessor)
4716 return -EPERM;
4717
4718 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4719 return -EFAULT;
4720
4721 return accessor(dev, &attr);
4722 }
4723
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4724 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4725 unsigned long arg)
4726 {
4727 struct kvm_device *dev = filp->private_data;
4728
4729 if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4730 return -EIO;
4731
4732 switch (ioctl) {
4733 case KVM_SET_DEVICE_ATTR:
4734 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4735 case KVM_GET_DEVICE_ATTR:
4736 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4737 case KVM_HAS_DEVICE_ATTR:
4738 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4739 default:
4740 if (dev->ops->ioctl)
4741 return dev->ops->ioctl(dev, ioctl, arg);
4742
4743 return -ENOTTY;
4744 }
4745 }
4746
kvm_device_release(struct inode * inode,struct file * filp)4747 static int kvm_device_release(struct inode *inode, struct file *filp)
4748 {
4749 struct kvm_device *dev = filp->private_data;
4750 struct kvm *kvm = dev->kvm;
4751
4752 if (dev->ops->release) {
4753 mutex_lock(&kvm->lock);
4754 list_del_rcu(&dev->vm_node);
4755 synchronize_rcu();
4756 dev->ops->release(dev);
4757 mutex_unlock(&kvm->lock);
4758 }
4759
4760 kvm_put_kvm(kvm);
4761 return 0;
4762 }
4763
4764 static struct file_operations kvm_device_fops = {
4765 .unlocked_ioctl = kvm_device_ioctl,
4766 .release = kvm_device_release,
4767 KVM_COMPAT(kvm_device_ioctl),
4768 .mmap = kvm_device_mmap,
4769 };
4770
kvm_device_from_filp(struct file * filp)4771 struct kvm_device *kvm_device_from_filp(struct file *filp)
4772 {
4773 if (filp->f_op != &kvm_device_fops)
4774 return NULL;
4775
4776 return filp->private_data;
4777 }
4778
4779 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4780 #ifdef CONFIG_KVM_MPIC
4781 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4782 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
4783 #endif
4784 };
4785
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4786 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4787 {
4788 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4789 return -ENOSPC;
4790
4791 if (kvm_device_ops_table[type] != NULL)
4792 return -EEXIST;
4793
4794 kvm_device_ops_table[type] = ops;
4795 return 0;
4796 }
4797
kvm_unregister_device_ops(u32 type)4798 void kvm_unregister_device_ops(u32 type)
4799 {
4800 if (kvm_device_ops_table[type] != NULL)
4801 kvm_device_ops_table[type] = NULL;
4802 }
4803
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4804 static int kvm_ioctl_create_device(struct kvm *kvm,
4805 struct kvm_create_device *cd)
4806 {
4807 const struct kvm_device_ops *ops;
4808 struct kvm_device *dev;
4809 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4810 int type;
4811 int ret;
4812
4813 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4814 return -ENODEV;
4815
4816 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4817 ops = kvm_device_ops_table[type];
4818 if (ops == NULL)
4819 return -ENODEV;
4820
4821 if (test)
4822 return 0;
4823
4824 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4825 if (!dev)
4826 return -ENOMEM;
4827
4828 dev->ops = ops;
4829 dev->kvm = kvm;
4830
4831 mutex_lock(&kvm->lock);
4832 ret = ops->create(dev, type);
4833 if (ret < 0) {
4834 mutex_unlock(&kvm->lock);
4835 kfree(dev);
4836 return ret;
4837 }
4838 list_add_rcu(&dev->vm_node, &kvm->devices);
4839 mutex_unlock(&kvm->lock);
4840
4841 if (ops->init)
4842 ops->init(dev);
4843
4844 kvm_get_kvm(kvm);
4845 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4846 if (ret < 0) {
4847 kvm_put_kvm_no_destroy(kvm);
4848 mutex_lock(&kvm->lock);
4849 list_del_rcu(&dev->vm_node);
4850 synchronize_rcu();
4851 if (ops->release)
4852 ops->release(dev);
4853 mutex_unlock(&kvm->lock);
4854 if (ops->destroy)
4855 ops->destroy(dev);
4856 return ret;
4857 }
4858
4859 cd->fd = ret;
4860 return 0;
4861 }
4862
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)4863 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4864 {
4865 switch (arg) {
4866 case KVM_CAP_USER_MEMORY:
4867 case KVM_CAP_USER_MEMORY2:
4868 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4869 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4870 case KVM_CAP_INTERNAL_ERROR_DATA:
4871 #ifdef CONFIG_HAVE_KVM_MSI
4872 case KVM_CAP_SIGNAL_MSI:
4873 #endif
4874 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4875 case KVM_CAP_IRQFD:
4876 #endif
4877 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4878 case KVM_CAP_CHECK_EXTENSION_VM:
4879 case KVM_CAP_ENABLE_CAP_VM:
4880 case KVM_CAP_HALT_POLL:
4881 return 1;
4882 #ifdef CONFIG_KVM_MMIO
4883 case KVM_CAP_COALESCED_MMIO:
4884 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4885 case KVM_CAP_COALESCED_PIO:
4886 return 1;
4887 #endif
4888 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4889 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4890 return KVM_DIRTY_LOG_MANUAL_CAPS;
4891 #endif
4892 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4893 case KVM_CAP_IRQ_ROUTING:
4894 return KVM_MAX_IRQ_ROUTES;
4895 #endif
4896 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4897 case KVM_CAP_MULTI_ADDRESS_SPACE:
4898 if (kvm)
4899 return kvm_arch_nr_memslot_as_ids(kvm);
4900 return KVM_MAX_NR_ADDRESS_SPACES;
4901 #endif
4902 case KVM_CAP_NR_MEMSLOTS:
4903 return KVM_USER_MEM_SLOTS;
4904 case KVM_CAP_DIRTY_LOG_RING:
4905 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4906 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4907 #else
4908 return 0;
4909 #endif
4910 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4911 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4912 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4913 #else
4914 return 0;
4915 #endif
4916 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4917 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4918 #endif
4919 case KVM_CAP_BINARY_STATS_FD:
4920 case KVM_CAP_SYSTEM_EVENT_DATA:
4921 case KVM_CAP_DEVICE_CTRL:
4922 return 1;
4923 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4924 case KVM_CAP_MEMORY_ATTRIBUTES:
4925 return kvm_supported_mem_attributes(kvm);
4926 #endif
4927 #ifdef CONFIG_KVM_PRIVATE_MEM
4928 case KVM_CAP_GUEST_MEMFD:
4929 return !kvm || kvm_arch_has_private_mem(kvm);
4930 #endif
4931 default:
4932 break;
4933 }
4934 return kvm_vm_ioctl_check_extension(kvm, arg);
4935 }
4936
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4937 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4938 {
4939 int r;
4940
4941 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4942 return -EINVAL;
4943
4944 /* the size should be power of 2 */
4945 if (!size || (size & (size - 1)))
4946 return -EINVAL;
4947
4948 /* Should be bigger to keep the reserved entries, or a page */
4949 if (size < kvm_dirty_ring_get_rsvd_entries() *
4950 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4951 return -EINVAL;
4952
4953 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4954 sizeof(struct kvm_dirty_gfn))
4955 return -E2BIG;
4956
4957 /* We only allow it to set once */
4958 if (kvm->dirty_ring_size)
4959 return -EINVAL;
4960
4961 mutex_lock(&kvm->lock);
4962
4963 if (kvm->created_vcpus) {
4964 /* We don't allow to change this value after vcpu created */
4965 r = -EINVAL;
4966 } else {
4967 kvm->dirty_ring_size = size;
4968 r = 0;
4969 }
4970
4971 mutex_unlock(&kvm->lock);
4972 return r;
4973 }
4974
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4975 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4976 {
4977 unsigned long i;
4978 struct kvm_vcpu *vcpu;
4979 int cleared = 0;
4980
4981 if (!kvm->dirty_ring_size)
4982 return -EINVAL;
4983
4984 mutex_lock(&kvm->slots_lock);
4985
4986 kvm_for_each_vcpu(i, vcpu, kvm)
4987 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4988
4989 mutex_unlock(&kvm->slots_lock);
4990
4991 if (cleared)
4992 kvm_flush_remote_tlbs(kvm);
4993
4994 return cleared;
4995 }
4996
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)4997 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4998 struct kvm_enable_cap *cap)
4999 {
5000 return -EINVAL;
5001 }
5002
kvm_are_all_memslots_empty(struct kvm * kvm)5003 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5004 {
5005 int i;
5006
5007 lockdep_assert_held(&kvm->slots_lock);
5008
5009 for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5010 if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5011 return false;
5012 }
5013
5014 return true;
5015 }
5016 EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
5017
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)5018 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5019 struct kvm_enable_cap *cap)
5020 {
5021 switch (cap->cap) {
5022 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5023 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5024 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5025
5026 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5027 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5028
5029 if (cap->flags || (cap->args[0] & ~allowed_options))
5030 return -EINVAL;
5031 kvm->manual_dirty_log_protect = cap->args[0];
5032 return 0;
5033 }
5034 #endif
5035 case KVM_CAP_HALT_POLL: {
5036 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5037 return -EINVAL;
5038
5039 kvm->max_halt_poll_ns = cap->args[0];
5040
5041 /*
5042 * Ensure kvm->override_halt_poll_ns does not become visible
5043 * before kvm->max_halt_poll_ns.
5044 *
5045 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5046 */
5047 smp_wmb();
5048 kvm->override_halt_poll_ns = true;
5049
5050 return 0;
5051 }
5052 case KVM_CAP_DIRTY_LOG_RING:
5053 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5054 if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5055 return -EINVAL;
5056
5057 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5058 case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5059 int r = -EINVAL;
5060
5061 if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5062 !kvm->dirty_ring_size || cap->flags)
5063 return r;
5064
5065 mutex_lock(&kvm->slots_lock);
5066
5067 /*
5068 * For simplicity, allow enabling ring+bitmap if and only if
5069 * there are no memslots, e.g. to ensure all memslots allocate
5070 * a bitmap after the capability is enabled.
5071 */
5072 if (kvm_are_all_memslots_empty(kvm)) {
5073 kvm->dirty_ring_with_bitmap = true;
5074 r = 0;
5075 }
5076
5077 mutex_unlock(&kvm->slots_lock);
5078
5079 return r;
5080 }
5081 default:
5082 return kvm_vm_ioctl_enable_cap(kvm, cap);
5083 }
5084 }
5085
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)5086 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5087 size_t size, loff_t *offset)
5088 {
5089 struct kvm *kvm = file->private_data;
5090
5091 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5092 &kvm_vm_stats_desc[0], &kvm->stat,
5093 sizeof(kvm->stat), user_buffer, size, offset);
5094 }
5095
kvm_vm_stats_release(struct inode * inode,struct file * file)5096 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5097 {
5098 struct kvm *kvm = file->private_data;
5099
5100 kvm_put_kvm(kvm);
5101 return 0;
5102 }
5103
5104 static const struct file_operations kvm_vm_stats_fops = {
5105 .owner = THIS_MODULE,
5106 .read = kvm_vm_stats_read,
5107 .release = kvm_vm_stats_release,
5108 .llseek = noop_llseek,
5109 };
5110
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)5111 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5112 {
5113 int fd;
5114 struct file *file;
5115
5116 fd = get_unused_fd_flags(O_CLOEXEC);
5117 if (fd < 0)
5118 return fd;
5119
5120 file = anon_inode_getfile("kvm-vm-stats",
5121 &kvm_vm_stats_fops, kvm, O_RDONLY);
5122 if (IS_ERR(file)) {
5123 put_unused_fd(fd);
5124 return PTR_ERR(file);
5125 }
5126
5127 kvm_get_kvm(kvm);
5128
5129 file->f_mode |= FMODE_PREAD;
5130 fd_install(fd, file);
5131
5132 return fd;
5133 }
5134
5135 #define SANITY_CHECK_MEM_REGION_FIELD(field) \
5136 do { \
5137 BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) != \
5138 offsetof(struct kvm_userspace_memory_region2, field)); \
5139 BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) != \
5140 sizeof_field(struct kvm_userspace_memory_region2, field)); \
5141 } while (0)
5142
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5143 static long kvm_vm_ioctl(struct file *filp,
5144 unsigned int ioctl, unsigned long arg)
5145 {
5146 struct kvm *kvm = filp->private_data;
5147 void __user *argp = (void __user *)arg;
5148 int r;
5149
5150 if (kvm->mm != current->mm || kvm->vm_dead)
5151 return -EIO;
5152 switch (ioctl) {
5153 case KVM_CREATE_VCPU:
5154 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5155 break;
5156 case KVM_ENABLE_CAP: {
5157 struct kvm_enable_cap cap;
5158
5159 r = -EFAULT;
5160 if (copy_from_user(&cap, argp, sizeof(cap)))
5161 goto out;
5162 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5163 break;
5164 }
5165 case KVM_SET_USER_MEMORY_REGION2:
5166 case KVM_SET_USER_MEMORY_REGION: {
5167 struct kvm_userspace_memory_region2 mem;
5168 unsigned long size;
5169
5170 if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5171 /*
5172 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5173 * accessed, but avoid leaking kernel memory in case of a bug.
5174 */
5175 memset(&mem, 0, sizeof(mem));
5176 size = sizeof(struct kvm_userspace_memory_region);
5177 } else {
5178 size = sizeof(struct kvm_userspace_memory_region2);
5179 }
5180
5181 /* Ensure the common parts of the two structs are identical. */
5182 SANITY_CHECK_MEM_REGION_FIELD(slot);
5183 SANITY_CHECK_MEM_REGION_FIELD(flags);
5184 SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5185 SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5186 SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5187
5188 r = -EFAULT;
5189 if (copy_from_user(&mem, argp, size))
5190 goto out;
5191
5192 r = -EINVAL;
5193 if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5194 (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5195 goto out;
5196
5197 r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5198 break;
5199 }
5200 case KVM_GET_DIRTY_LOG: {
5201 struct kvm_dirty_log log;
5202
5203 r = -EFAULT;
5204 if (copy_from_user(&log, argp, sizeof(log)))
5205 goto out;
5206 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5207 break;
5208 }
5209 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5210 case KVM_CLEAR_DIRTY_LOG: {
5211 struct kvm_clear_dirty_log log;
5212
5213 r = -EFAULT;
5214 if (copy_from_user(&log, argp, sizeof(log)))
5215 goto out;
5216 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5217 break;
5218 }
5219 #endif
5220 #ifdef CONFIG_KVM_MMIO
5221 case KVM_REGISTER_COALESCED_MMIO: {
5222 struct kvm_coalesced_mmio_zone zone;
5223
5224 r = -EFAULT;
5225 if (copy_from_user(&zone, argp, sizeof(zone)))
5226 goto out;
5227 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5228 break;
5229 }
5230 case KVM_UNREGISTER_COALESCED_MMIO: {
5231 struct kvm_coalesced_mmio_zone zone;
5232
5233 r = -EFAULT;
5234 if (copy_from_user(&zone, argp, sizeof(zone)))
5235 goto out;
5236 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5237 break;
5238 }
5239 #endif
5240 case KVM_IRQFD: {
5241 struct kvm_irqfd data;
5242
5243 r = -EFAULT;
5244 if (copy_from_user(&data, argp, sizeof(data)))
5245 goto out;
5246 r = kvm_irqfd(kvm, &data);
5247 break;
5248 }
5249 case KVM_IOEVENTFD: {
5250 struct kvm_ioeventfd data;
5251
5252 r = -EFAULT;
5253 if (copy_from_user(&data, argp, sizeof(data)))
5254 goto out;
5255 r = kvm_ioeventfd(kvm, &data);
5256 break;
5257 }
5258 #ifdef CONFIG_HAVE_KVM_MSI
5259 case KVM_SIGNAL_MSI: {
5260 struct kvm_msi msi;
5261
5262 r = -EFAULT;
5263 if (copy_from_user(&msi, argp, sizeof(msi)))
5264 goto out;
5265 r = kvm_send_userspace_msi(kvm, &msi);
5266 break;
5267 }
5268 #endif
5269 #ifdef __KVM_HAVE_IRQ_LINE
5270 case KVM_IRQ_LINE_STATUS:
5271 case KVM_IRQ_LINE: {
5272 struct kvm_irq_level irq_event;
5273
5274 r = -EFAULT;
5275 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5276 goto out;
5277
5278 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5279 ioctl == KVM_IRQ_LINE_STATUS);
5280 if (r)
5281 goto out;
5282
5283 r = -EFAULT;
5284 if (ioctl == KVM_IRQ_LINE_STATUS) {
5285 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5286 goto out;
5287 }
5288
5289 r = 0;
5290 break;
5291 }
5292 #endif
5293 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5294 case KVM_SET_GSI_ROUTING: {
5295 struct kvm_irq_routing routing;
5296 struct kvm_irq_routing __user *urouting;
5297 struct kvm_irq_routing_entry *entries = NULL;
5298
5299 r = -EFAULT;
5300 if (copy_from_user(&routing, argp, sizeof(routing)))
5301 goto out;
5302 r = -EINVAL;
5303 if (!kvm_arch_can_set_irq_routing(kvm))
5304 goto out;
5305 if (routing.nr > KVM_MAX_IRQ_ROUTES)
5306 goto out;
5307 if (routing.flags)
5308 goto out;
5309 if (routing.nr) {
5310 urouting = argp;
5311 entries = vmemdup_array_user(urouting->entries,
5312 routing.nr, sizeof(*entries));
5313 if (IS_ERR(entries)) {
5314 r = PTR_ERR(entries);
5315 goto out;
5316 }
5317 }
5318 r = kvm_set_irq_routing(kvm, entries, routing.nr,
5319 routing.flags);
5320 kvfree(entries);
5321 break;
5322 }
5323 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5324 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5325 case KVM_SET_MEMORY_ATTRIBUTES: {
5326 struct kvm_memory_attributes attrs;
5327
5328 r = -EFAULT;
5329 if (copy_from_user(&attrs, argp, sizeof(attrs)))
5330 goto out;
5331
5332 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5333 break;
5334 }
5335 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5336 case KVM_CREATE_DEVICE: {
5337 struct kvm_create_device cd;
5338
5339 r = -EFAULT;
5340 if (copy_from_user(&cd, argp, sizeof(cd)))
5341 goto out;
5342
5343 r = kvm_ioctl_create_device(kvm, &cd);
5344 if (r)
5345 goto out;
5346
5347 r = -EFAULT;
5348 if (copy_to_user(argp, &cd, sizeof(cd)))
5349 goto out;
5350
5351 r = 0;
5352 break;
5353 }
5354 case KVM_CHECK_EXTENSION:
5355 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5356 break;
5357 case KVM_RESET_DIRTY_RINGS:
5358 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5359 break;
5360 case KVM_GET_STATS_FD:
5361 r = kvm_vm_ioctl_get_stats_fd(kvm);
5362 break;
5363 #ifdef CONFIG_KVM_PRIVATE_MEM
5364 case KVM_CREATE_GUEST_MEMFD: {
5365 struct kvm_create_guest_memfd guest_memfd;
5366
5367 r = -EFAULT;
5368 if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5369 goto out;
5370
5371 r = kvm_gmem_create(kvm, &guest_memfd);
5372 break;
5373 }
5374 #endif
5375 default:
5376 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5377 }
5378 out:
5379 return r;
5380 }
5381
5382 #ifdef CONFIG_KVM_COMPAT
5383 struct compat_kvm_dirty_log {
5384 __u32 slot;
5385 __u32 padding1;
5386 union {
5387 compat_uptr_t dirty_bitmap; /* one bit per page */
5388 __u64 padding2;
5389 };
5390 };
5391
5392 struct compat_kvm_clear_dirty_log {
5393 __u32 slot;
5394 __u32 num_pages;
5395 __u64 first_page;
5396 union {
5397 compat_uptr_t dirty_bitmap; /* one bit per page */
5398 __u64 padding2;
5399 };
5400 };
5401
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5402 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5403 unsigned long arg)
5404 {
5405 return -ENOTTY;
5406 }
5407
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5408 static long kvm_vm_compat_ioctl(struct file *filp,
5409 unsigned int ioctl, unsigned long arg)
5410 {
5411 struct kvm *kvm = filp->private_data;
5412 int r;
5413
5414 if (kvm->mm != current->mm || kvm->vm_dead)
5415 return -EIO;
5416
5417 r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5418 if (r != -ENOTTY)
5419 return r;
5420
5421 switch (ioctl) {
5422 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5423 case KVM_CLEAR_DIRTY_LOG: {
5424 struct compat_kvm_clear_dirty_log compat_log;
5425 struct kvm_clear_dirty_log log;
5426
5427 if (copy_from_user(&compat_log, (void __user *)arg,
5428 sizeof(compat_log)))
5429 return -EFAULT;
5430 log.slot = compat_log.slot;
5431 log.num_pages = compat_log.num_pages;
5432 log.first_page = compat_log.first_page;
5433 log.padding2 = compat_log.padding2;
5434 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5435
5436 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5437 break;
5438 }
5439 #endif
5440 case KVM_GET_DIRTY_LOG: {
5441 struct compat_kvm_dirty_log compat_log;
5442 struct kvm_dirty_log log;
5443
5444 if (copy_from_user(&compat_log, (void __user *)arg,
5445 sizeof(compat_log)))
5446 return -EFAULT;
5447 log.slot = compat_log.slot;
5448 log.padding1 = compat_log.padding1;
5449 log.padding2 = compat_log.padding2;
5450 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5451
5452 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5453 break;
5454 }
5455 default:
5456 r = kvm_vm_ioctl(filp, ioctl, arg);
5457 }
5458 return r;
5459 }
5460 #endif
5461
5462 static struct file_operations kvm_vm_fops = {
5463 .release = kvm_vm_release,
5464 .unlocked_ioctl = kvm_vm_ioctl,
5465 .llseek = noop_llseek,
5466 KVM_COMPAT(kvm_vm_compat_ioctl),
5467 };
5468
file_is_kvm(struct file * file)5469 bool file_is_kvm(struct file *file)
5470 {
5471 return file && file->f_op == &kvm_vm_fops;
5472 }
5473 EXPORT_SYMBOL_GPL(file_is_kvm);
5474
kvm_dev_ioctl_create_vm(unsigned long type)5475 static int kvm_dev_ioctl_create_vm(unsigned long type)
5476 {
5477 char fdname[ITOA_MAX_LEN + 1];
5478 int r, fd;
5479 struct kvm *kvm;
5480 struct file *file;
5481
5482 fd = get_unused_fd_flags(O_CLOEXEC);
5483 if (fd < 0)
5484 return fd;
5485
5486 snprintf(fdname, sizeof(fdname), "%d", fd);
5487
5488 kvm = kvm_create_vm(type, fdname);
5489 if (IS_ERR(kvm)) {
5490 r = PTR_ERR(kvm);
5491 goto put_fd;
5492 }
5493
5494 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5495 if (IS_ERR(file)) {
5496 r = PTR_ERR(file);
5497 goto put_kvm;
5498 }
5499
5500 /*
5501 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5502 * already set, with ->release() being kvm_vm_release(). In error
5503 * cases it will be called by the final fput(file) and will take
5504 * care of doing kvm_put_kvm(kvm).
5505 */
5506 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5507
5508 fd_install(fd, file);
5509 return fd;
5510
5511 put_kvm:
5512 kvm_put_kvm(kvm);
5513 put_fd:
5514 put_unused_fd(fd);
5515 return r;
5516 }
5517
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5518 static long kvm_dev_ioctl(struct file *filp,
5519 unsigned int ioctl, unsigned long arg)
5520 {
5521 int r = -EINVAL;
5522
5523 switch (ioctl) {
5524 case KVM_GET_API_VERSION:
5525 if (arg)
5526 goto out;
5527 r = KVM_API_VERSION;
5528 break;
5529 case KVM_CREATE_VM:
5530 r = kvm_dev_ioctl_create_vm(arg);
5531 break;
5532 case KVM_CHECK_EXTENSION:
5533 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5534 break;
5535 case KVM_GET_VCPU_MMAP_SIZE:
5536 if (arg)
5537 goto out;
5538 r = PAGE_SIZE; /* struct kvm_run */
5539 #ifdef CONFIG_X86
5540 r += PAGE_SIZE; /* pio data page */
5541 #endif
5542 #ifdef CONFIG_KVM_MMIO
5543 r += PAGE_SIZE; /* coalesced mmio ring page */
5544 #endif
5545 break;
5546 default:
5547 return kvm_arch_dev_ioctl(filp, ioctl, arg);
5548 }
5549 out:
5550 return r;
5551 }
5552
5553 static struct file_operations kvm_chardev_ops = {
5554 .unlocked_ioctl = kvm_dev_ioctl,
5555 .llseek = noop_llseek,
5556 KVM_COMPAT(kvm_dev_ioctl),
5557 };
5558
5559 static struct miscdevice kvm_dev = {
5560 KVM_MINOR,
5561 "kvm",
5562 &kvm_chardev_ops,
5563 };
5564
5565 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5566 static bool enable_virt_at_load = true;
5567 module_param(enable_virt_at_load, bool, 0444);
5568
5569 __visible bool kvm_rebooting;
5570 EXPORT_SYMBOL_GPL(kvm_rebooting);
5571
5572 static DEFINE_PER_CPU(bool, virtualization_enabled);
5573 static DEFINE_MUTEX(kvm_usage_lock);
5574 static int kvm_usage_count;
5575
kvm_arch_enable_virtualization(void)5576 __weak void kvm_arch_enable_virtualization(void)
5577 {
5578
5579 }
5580
kvm_arch_disable_virtualization(void)5581 __weak void kvm_arch_disable_virtualization(void)
5582 {
5583
5584 }
5585
kvm_enable_virtualization_cpu(void)5586 static int kvm_enable_virtualization_cpu(void)
5587 {
5588 if (__this_cpu_read(virtualization_enabled))
5589 return 0;
5590
5591 if (kvm_arch_enable_virtualization_cpu()) {
5592 pr_info("kvm: enabling virtualization on CPU%d failed\n",
5593 raw_smp_processor_id());
5594 return -EIO;
5595 }
5596
5597 __this_cpu_write(virtualization_enabled, true);
5598 return 0;
5599 }
5600
kvm_online_cpu(unsigned int cpu)5601 static int kvm_online_cpu(unsigned int cpu)
5602 {
5603 /*
5604 * Abort the CPU online process if hardware virtualization cannot
5605 * be enabled. Otherwise running VMs would encounter unrecoverable
5606 * errors when scheduled to this CPU.
5607 */
5608 return kvm_enable_virtualization_cpu();
5609 }
5610
kvm_disable_virtualization_cpu(void * ign)5611 static void kvm_disable_virtualization_cpu(void *ign)
5612 {
5613 if (!__this_cpu_read(virtualization_enabled))
5614 return;
5615
5616 kvm_arch_disable_virtualization_cpu();
5617
5618 __this_cpu_write(virtualization_enabled, false);
5619 }
5620
kvm_offline_cpu(unsigned int cpu)5621 static int kvm_offline_cpu(unsigned int cpu)
5622 {
5623 kvm_disable_virtualization_cpu(NULL);
5624 return 0;
5625 }
5626
kvm_shutdown(void)5627 static void kvm_shutdown(void)
5628 {
5629 /*
5630 * Disable hardware virtualization and set kvm_rebooting to indicate
5631 * that KVM has asynchronously disabled hardware virtualization, i.e.
5632 * that relevant errors and exceptions aren't entirely unexpected.
5633 * Some flavors of hardware virtualization need to be disabled before
5634 * transferring control to firmware (to perform shutdown/reboot), e.g.
5635 * on x86, virtualization can block INIT interrupts, which are used by
5636 * firmware to pull APs back under firmware control. Note, this path
5637 * is used for both shutdown and reboot scenarios, i.e. neither name is
5638 * 100% comprehensive.
5639 */
5640 pr_info("kvm: exiting hardware virtualization\n");
5641 kvm_rebooting = true;
5642 on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
5643 }
5644
kvm_suspend(void)5645 static int kvm_suspend(void)
5646 {
5647 /*
5648 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5649 * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
5650 * count is stable. Assert that kvm_usage_lock is not held to ensure
5651 * the system isn't suspended while KVM is enabling hardware. Hardware
5652 * enabling can be preempted, but the task cannot be frozen until it has
5653 * dropped all locks (userspace tasks are frozen via a fake signal).
5654 */
5655 lockdep_assert_not_held(&kvm_usage_lock);
5656 lockdep_assert_irqs_disabled();
5657
5658 kvm_disable_virtualization_cpu(NULL);
5659 return 0;
5660 }
5661
kvm_resume(void)5662 static void kvm_resume(void)
5663 {
5664 lockdep_assert_not_held(&kvm_usage_lock);
5665 lockdep_assert_irqs_disabled();
5666
5667 WARN_ON_ONCE(kvm_enable_virtualization_cpu());
5668 }
5669
5670 static struct syscore_ops kvm_syscore_ops = {
5671 .suspend = kvm_suspend,
5672 .resume = kvm_resume,
5673 .shutdown = kvm_shutdown,
5674 };
5675
kvm_enable_virtualization(void)5676 static int kvm_enable_virtualization(void)
5677 {
5678 int r;
5679
5680 guard(mutex)(&kvm_usage_lock);
5681
5682 if (kvm_usage_count++)
5683 return 0;
5684
5685 kvm_arch_enable_virtualization();
5686
5687 r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
5688 kvm_online_cpu, kvm_offline_cpu);
5689 if (r)
5690 goto err_cpuhp;
5691
5692 register_syscore_ops(&kvm_syscore_ops);
5693
5694 /*
5695 * Undo virtualization enabling and bail if the system is going down.
5696 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5697 * possible for an in-flight operation to enable virtualization after
5698 * syscore_shutdown() is called, i.e. without kvm_shutdown() being
5699 * invoked. Note, this relies on system_state being set _before_
5700 * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
5701 * or this CPU observes the impending shutdown. Which is why KVM uses
5702 * a syscore ops hook instead of registering a dedicated reboot
5703 * notifier (the latter runs before system_state is updated).
5704 */
5705 if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5706 system_state == SYSTEM_RESTART) {
5707 r = -EBUSY;
5708 goto err_rebooting;
5709 }
5710
5711 return 0;
5712
5713 err_rebooting:
5714 unregister_syscore_ops(&kvm_syscore_ops);
5715 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5716 err_cpuhp:
5717 kvm_arch_disable_virtualization();
5718 --kvm_usage_count;
5719 return r;
5720 }
5721
kvm_disable_virtualization(void)5722 static void kvm_disable_virtualization(void)
5723 {
5724 guard(mutex)(&kvm_usage_lock);
5725
5726 if (--kvm_usage_count)
5727 return;
5728
5729 unregister_syscore_ops(&kvm_syscore_ops);
5730 cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5731 kvm_arch_disable_virtualization();
5732 }
5733
kvm_init_virtualization(void)5734 static int kvm_init_virtualization(void)
5735 {
5736 if (enable_virt_at_load)
5737 return kvm_enable_virtualization();
5738
5739 return 0;
5740 }
5741
kvm_uninit_virtualization(void)5742 static void kvm_uninit_virtualization(void)
5743 {
5744 if (enable_virt_at_load)
5745 kvm_disable_virtualization();
5746 }
5747 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
kvm_enable_virtualization(void)5748 static int kvm_enable_virtualization(void)
5749 {
5750 return 0;
5751 }
5752
kvm_init_virtualization(void)5753 static int kvm_init_virtualization(void)
5754 {
5755 return 0;
5756 }
5757
kvm_disable_virtualization(void)5758 static void kvm_disable_virtualization(void)
5759 {
5760
5761 }
5762
kvm_uninit_virtualization(void)5763 static void kvm_uninit_virtualization(void)
5764 {
5765
5766 }
5767 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5768
kvm_iodevice_destructor(struct kvm_io_device * dev)5769 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5770 {
5771 if (dev->ops->destructor)
5772 dev->ops->destructor(dev);
5773 }
5774
kvm_io_bus_destroy(struct kvm_io_bus * bus)5775 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5776 {
5777 int i;
5778
5779 for (i = 0; i < bus->dev_count; i++) {
5780 struct kvm_io_device *pos = bus->range[i].dev;
5781
5782 kvm_iodevice_destructor(pos);
5783 }
5784 kfree(bus);
5785 }
5786
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5787 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5788 const struct kvm_io_range *r2)
5789 {
5790 gpa_t addr1 = r1->addr;
5791 gpa_t addr2 = r2->addr;
5792
5793 if (addr1 < addr2)
5794 return -1;
5795
5796 /* If r2->len == 0, match the exact address. If r2->len != 0,
5797 * accept any overlapping write. Any order is acceptable for
5798 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5799 * we process all of them.
5800 */
5801 if (r2->len) {
5802 addr1 += r1->len;
5803 addr2 += r2->len;
5804 }
5805
5806 if (addr1 > addr2)
5807 return 1;
5808
5809 return 0;
5810 }
5811
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5812 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5813 {
5814 return kvm_io_bus_cmp(p1, p2);
5815 }
5816
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5817 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5818 gpa_t addr, int len)
5819 {
5820 struct kvm_io_range *range, key;
5821 int off;
5822
5823 key = (struct kvm_io_range) {
5824 .addr = addr,
5825 .len = len,
5826 };
5827
5828 range = bsearch(&key, bus->range, bus->dev_count,
5829 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5830 if (range == NULL)
5831 return -ENOENT;
5832
5833 off = range - bus->range;
5834
5835 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5836 off--;
5837
5838 return off;
5839 }
5840
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5841 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5842 struct kvm_io_range *range, const void *val)
5843 {
5844 int idx;
5845
5846 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5847 if (idx < 0)
5848 return -EOPNOTSUPP;
5849
5850 while (idx < bus->dev_count &&
5851 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5852 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5853 range->len, val))
5854 return idx;
5855 idx++;
5856 }
5857
5858 return -EOPNOTSUPP;
5859 }
5860
5861 /* kvm_io_bus_write - called under kvm->slots_lock */
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5862 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5863 int len, const void *val)
5864 {
5865 struct kvm_io_bus *bus;
5866 struct kvm_io_range range;
5867 int r;
5868
5869 range = (struct kvm_io_range) {
5870 .addr = addr,
5871 .len = len,
5872 };
5873
5874 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5875 if (!bus)
5876 return -ENOMEM;
5877 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5878 return r < 0 ? r : 0;
5879 }
5880 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5881
5882 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5883 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5884 gpa_t addr, int len, const void *val, long cookie)
5885 {
5886 struct kvm_io_bus *bus;
5887 struct kvm_io_range range;
5888
5889 range = (struct kvm_io_range) {
5890 .addr = addr,
5891 .len = len,
5892 };
5893
5894 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5895 if (!bus)
5896 return -ENOMEM;
5897
5898 /* First try the device referenced by cookie. */
5899 if ((cookie >= 0) && (cookie < bus->dev_count) &&
5900 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5901 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5902 val))
5903 return cookie;
5904
5905 /*
5906 * cookie contained garbage; fall back to search and return the
5907 * correct cookie value.
5908 */
5909 return __kvm_io_bus_write(vcpu, bus, &range, val);
5910 }
5911
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5912 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5913 struct kvm_io_range *range, void *val)
5914 {
5915 int idx;
5916
5917 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5918 if (idx < 0)
5919 return -EOPNOTSUPP;
5920
5921 while (idx < bus->dev_count &&
5922 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5923 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5924 range->len, val))
5925 return idx;
5926 idx++;
5927 }
5928
5929 return -EOPNOTSUPP;
5930 }
5931
5932 /* kvm_io_bus_read - called under kvm->slots_lock */
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5933 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5934 int len, void *val)
5935 {
5936 struct kvm_io_bus *bus;
5937 struct kvm_io_range range;
5938 int r;
5939
5940 range = (struct kvm_io_range) {
5941 .addr = addr,
5942 .len = len,
5943 };
5944
5945 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5946 if (!bus)
5947 return -ENOMEM;
5948 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5949 return r < 0 ? r : 0;
5950 }
5951
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5952 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5953 int len, struct kvm_io_device *dev)
5954 {
5955 int i;
5956 struct kvm_io_bus *new_bus, *bus;
5957 struct kvm_io_range range;
5958
5959 lockdep_assert_held(&kvm->slots_lock);
5960
5961 bus = kvm_get_bus(kvm, bus_idx);
5962 if (!bus)
5963 return -ENOMEM;
5964
5965 /* exclude ioeventfd which is limited by maximum fd */
5966 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5967 return -ENOSPC;
5968
5969 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5970 GFP_KERNEL_ACCOUNT);
5971 if (!new_bus)
5972 return -ENOMEM;
5973
5974 range = (struct kvm_io_range) {
5975 .addr = addr,
5976 .len = len,
5977 .dev = dev,
5978 };
5979
5980 for (i = 0; i < bus->dev_count; i++)
5981 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5982 break;
5983
5984 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5985 new_bus->dev_count++;
5986 new_bus->range[i] = range;
5987 memcpy(new_bus->range + i + 1, bus->range + i,
5988 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5989 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5990 synchronize_srcu_expedited(&kvm->srcu);
5991 kfree(bus);
5992
5993 return 0;
5994 }
5995
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)5996 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5997 struct kvm_io_device *dev)
5998 {
5999 int i;
6000 struct kvm_io_bus *new_bus, *bus;
6001
6002 lockdep_assert_held(&kvm->slots_lock);
6003
6004 bus = kvm_get_bus(kvm, bus_idx);
6005 if (!bus)
6006 return 0;
6007
6008 for (i = 0; i < bus->dev_count; i++) {
6009 if (bus->range[i].dev == dev) {
6010 break;
6011 }
6012 }
6013
6014 if (i == bus->dev_count)
6015 return 0;
6016
6017 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
6018 GFP_KERNEL_ACCOUNT);
6019 if (new_bus) {
6020 memcpy(new_bus, bus, struct_size(bus, range, i));
6021 new_bus->dev_count--;
6022 memcpy(new_bus->range + i, bus->range + i + 1,
6023 flex_array_size(new_bus, range, new_bus->dev_count - i));
6024 }
6025
6026 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6027 synchronize_srcu_expedited(&kvm->srcu);
6028
6029 /*
6030 * If NULL bus is installed, destroy the old bus, including all the
6031 * attached devices. Otherwise, destroy the caller's device only.
6032 */
6033 if (!new_bus) {
6034 pr_err("kvm: failed to shrink bus, removing it completely\n");
6035 kvm_io_bus_destroy(bus);
6036 return -ENOMEM;
6037 }
6038
6039 kvm_iodevice_destructor(dev);
6040 kfree(bus);
6041 return 0;
6042 }
6043
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)6044 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6045 gpa_t addr)
6046 {
6047 struct kvm_io_bus *bus;
6048 int dev_idx, srcu_idx;
6049 struct kvm_io_device *iodev = NULL;
6050
6051 srcu_idx = srcu_read_lock(&kvm->srcu);
6052
6053 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6054 if (!bus)
6055 goto out_unlock;
6056
6057 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6058 if (dev_idx < 0)
6059 goto out_unlock;
6060
6061 iodev = bus->range[dev_idx].dev;
6062
6063 out_unlock:
6064 srcu_read_unlock(&kvm->srcu, srcu_idx);
6065
6066 return iodev;
6067 }
6068 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6069
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)6070 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6071 int (*get)(void *, u64 *), int (*set)(void *, u64),
6072 const char *fmt)
6073 {
6074 int ret;
6075 struct kvm_stat_data *stat_data = inode->i_private;
6076
6077 /*
6078 * The debugfs files are a reference to the kvm struct which
6079 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
6080 * avoids the race between open and the removal of the debugfs directory.
6081 */
6082 if (!kvm_get_kvm_safe(stat_data->kvm))
6083 return -ENOENT;
6084
6085 ret = simple_attr_open(inode, file, get,
6086 kvm_stats_debugfs_mode(stat_data->desc) & 0222
6087 ? set : NULL, fmt);
6088 if (ret)
6089 kvm_put_kvm(stat_data->kvm);
6090
6091 return ret;
6092 }
6093
kvm_debugfs_release(struct inode * inode,struct file * file)6094 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6095 {
6096 struct kvm_stat_data *stat_data = inode->i_private;
6097
6098 simple_attr_release(inode, file);
6099 kvm_put_kvm(stat_data->kvm);
6100
6101 return 0;
6102 }
6103
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)6104 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6105 {
6106 *val = *(u64 *)((void *)(&kvm->stat) + offset);
6107
6108 return 0;
6109 }
6110
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)6111 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6112 {
6113 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
6114
6115 return 0;
6116 }
6117
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)6118 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6119 {
6120 unsigned long i;
6121 struct kvm_vcpu *vcpu;
6122
6123 *val = 0;
6124
6125 kvm_for_each_vcpu(i, vcpu, kvm)
6126 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
6127
6128 return 0;
6129 }
6130
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)6131 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6132 {
6133 unsigned long i;
6134 struct kvm_vcpu *vcpu;
6135
6136 kvm_for_each_vcpu(i, vcpu, kvm)
6137 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6138
6139 return 0;
6140 }
6141
kvm_stat_data_get(void * data,u64 * val)6142 static int kvm_stat_data_get(void *data, u64 *val)
6143 {
6144 int r = -EFAULT;
6145 struct kvm_stat_data *stat_data = data;
6146
6147 switch (stat_data->kind) {
6148 case KVM_STAT_VM:
6149 r = kvm_get_stat_per_vm(stat_data->kvm,
6150 stat_data->desc->desc.offset, val);
6151 break;
6152 case KVM_STAT_VCPU:
6153 r = kvm_get_stat_per_vcpu(stat_data->kvm,
6154 stat_data->desc->desc.offset, val);
6155 break;
6156 }
6157
6158 return r;
6159 }
6160
kvm_stat_data_clear(void * data,u64 val)6161 static int kvm_stat_data_clear(void *data, u64 val)
6162 {
6163 int r = -EFAULT;
6164 struct kvm_stat_data *stat_data = data;
6165
6166 if (val)
6167 return -EINVAL;
6168
6169 switch (stat_data->kind) {
6170 case KVM_STAT_VM:
6171 r = kvm_clear_stat_per_vm(stat_data->kvm,
6172 stat_data->desc->desc.offset);
6173 break;
6174 case KVM_STAT_VCPU:
6175 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6176 stat_data->desc->desc.offset);
6177 break;
6178 }
6179
6180 return r;
6181 }
6182
kvm_stat_data_open(struct inode * inode,struct file * file)6183 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6184 {
6185 __simple_attr_check_format("%llu\n", 0ull);
6186 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6187 kvm_stat_data_clear, "%llu\n");
6188 }
6189
6190 static const struct file_operations stat_fops_per_vm = {
6191 .owner = THIS_MODULE,
6192 .open = kvm_stat_data_open,
6193 .release = kvm_debugfs_release,
6194 .read = simple_attr_read,
6195 .write = simple_attr_write,
6196 };
6197
vm_stat_get(void * _offset,u64 * val)6198 static int vm_stat_get(void *_offset, u64 *val)
6199 {
6200 unsigned offset = (long)_offset;
6201 struct kvm *kvm;
6202 u64 tmp_val;
6203
6204 *val = 0;
6205 mutex_lock(&kvm_lock);
6206 list_for_each_entry(kvm, &vm_list, vm_list) {
6207 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6208 *val += tmp_val;
6209 }
6210 mutex_unlock(&kvm_lock);
6211 return 0;
6212 }
6213
vm_stat_clear(void * _offset,u64 val)6214 static int vm_stat_clear(void *_offset, u64 val)
6215 {
6216 unsigned offset = (long)_offset;
6217 struct kvm *kvm;
6218
6219 if (val)
6220 return -EINVAL;
6221
6222 mutex_lock(&kvm_lock);
6223 list_for_each_entry(kvm, &vm_list, vm_list) {
6224 kvm_clear_stat_per_vm(kvm, offset);
6225 }
6226 mutex_unlock(&kvm_lock);
6227
6228 return 0;
6229 }
6230
6231 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6232 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6233
vcpu_stat_get(void * _offset,u64 * val)6234 static int vcpu_stat_get(void *_offset, u64 *val)
6235 {
6236 unsigned offset = (long)_offset;
6237 struct kvm *kvm;
6238 u64 tmp_val;
6239
6240 *val = 0;
6241 mutex_lock(&kvm_lock);
6242 list_for_each_entry(kvm, &vm_list, vm_list) {
6243 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6244 *val += tmp_val;
6245 }
6246 mutex_unlock(&kvm_lock);
6247 return 0;
6248 }
6249
vcpu_stat_clear(void * _offset,u64 val)6250 static int vcpu_stat_clear(void *_offset, u64 val)
6251 {
6252 unsigned offset = (long)_offset;
6253 struct kvm *kvm;
6254
6255 if (val)
6256 return -EINVAL;
6257
6258 mutex_lock(&kvm_lock);
6259 list_for_each_entry(kvm, &vm_list, vm_list) {
6260 kvm_clear_stat_per_vcpu(kvm, offset);
6261 }
6262 mutex_unlock(&kvm_lock);
6263
6264 return 0;
6265 }
6266
6267 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6268 "%llu\n");
6269 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6270
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)6271 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6272 {
6273 struct kobj_uevent_env *env;
6274 unsigned long long created, active;
6275
6276 if (!kvm_dev.this_device || !kvm)
6277 return;
6278
6279 mutex_lock(&kvm_lock);
6280 if (type == KVM_EVENT_CREATE_VM) {
6281 kvm_createvm_count++;
6282 kvm_active_vms++;
6283 } else if (type == KVM_EVENT_DESTROY_VM) {
6284 kvm_active_vms--;
6285 }
6286 created = kvm_createvm_count;
6287 active = kvm_active_vms;
6288 mutex_unlock(&kvm_lock);
6289
6290 env = kzalloc(sizeof(*env), GFP_KERNEL);
6291 if (!env)
6292 return;
6293
6294 add_uevent_var(env, "CREATED=%llu", created);
6295 add_uevent_var(env, "COUNT=%llu", active);
6296
6297 if (type == KVM_EVENT_CREATE_VM) {
6298 add_uevent_var(env, "EVENT=create");
6299 kvm->userspace_pid = task_pid_nr(current);
6300 } else if (type == KVM_EVENT_DESTROY_VM) {
6301 add_uevent_var(env, "EVENT=destroy");
6302 }
6303 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6304
6305 if (!IS_ERR(kvm->debugfs_dentry)) {
6306 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6307
6308 if (p) {
6309 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6310 if (!IS_ERR(tmp))
6311 add_uevent_var(env, "STATS_PATH=%s", tmp);
6312 kfree(p);
6313 }
6314 }
6315 /* no need for checks, since we are adding at most only 5 keys */
6316 env->envp[env->envp_idx++] = NULL;
6317 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6318 kfree(env);
6319 }
6320
kvm_init_debug(void)6321 static void kvm_init_debug(void)
6322 {
6323 const struct file_operations *fops;
6324 const struct _kvm_stats_desc *pdesc;
6325 int i;
6326
6327 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6328
6329 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6330 pdesc = &kvm_vm_stats_desc[i];
6331 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6332 fops = &vm_stat_fops;
6333 else
6334 fops = &vm_stat_readonly_fops;
6335 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6336 kvm_debugfs_dir,
6337 (void *)(long)pdesc->desc.offset, fops);
6338 }
6339
6340 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6341 pdesc = &kvm_vcpu_stats_desc[i];
6342 if (kvm_stats_debugfs_mode(pdesc) & 0222)
6343 fops = &vcpu_stat_fops;
6344 else
6345 fops = &vcpu_stat_readonly_fops;
6346 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6347 kvm_debugfs_dir,
6348 (void *)(long)pdesc->desc.offset, fops);
6349 }
6350 }
6351
6352 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)6353 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6354 {
6355 return container_of(pn, struct kvm_vcpu, preempt_notifier);
6356 }
6357
kvm_sched_in(struct preempt_notifier * pn,int cpu)6358 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6359 {
6360 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6361
6362 WRITE_ONCE(vcpu->preempted, false);
6363 WRITE_ONCE(vcpu->ready, false);
6364
6365 __this_cpu_write(kvm_running_vcpu, vcpu);
6366 kvm_arch_vcpu_load(vcpu, cpu);
6367
6368 WRITE_ONCE(vcpu->scheduled_out, false);
6369 }
6370
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)6371 static void kvm_sched_out(struct preempt_notifier *pn,
6372 struct task_struct *next)
6373 {
6374 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6375
6376 WRITE_ONCE(vcpu->scheduled_out, true);
6377
6378 if (task_is_runnable(current) && vcpu->wants_to_run) {
6379 WRITE_ONCE(vcpu->preempted, true);
6380 WRITE_ONCE(vcpu->ready, true);
6381 }
6382 kvm_arch_vcpu_put(vcpu);
6383 __this_cpu_write(kvm_running_vcpu, NULL);
6384 }
6385
6386 /**
6387 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6388 *
6389 * We can disable preemption locally around accessing the per-CPU variable,
6390 * and use the resolved vcpu pointer after enabling preemption again,
6391 * because even if the current thread is migrated to another CPU, reading
6392 * the per-CPU value later will give us the same value as we update the
6393 * per-CPU variable in the preempt notifier handlers.
6394 */
kvm_get_running_vcpu(void)6395 struct kvm_vcpu *kvm_get_running_vcpu(void)
6396 {
6397 struct kvm_vcpu *vcpu;
6398
6399 preempt_disable();
6400 vcpu = __this_cpu_read(kvm_running_vcpu);
6401 preempt_enable();
6402
6403 return vcpu;
6404 }
6405 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6406
6407 /**
6408 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6409 */
kvm_get_running_vcpus(void)6410 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6411 {
6412 return &kvm_running_vcpu;
6413 }
6414
6415 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6416 static unsigned int kvm_guest_state(void)
6417 {
6418 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6419 unsigned int state;
6420
6421 if (!kvm_arch_pmi_in_guest(vcpu))
6422 return 0;
6423
6424 state = PERF_GUEST_ACTIVE;
6425 if (!kvm_arch_vcpu_in_kernel(vcpu))
6426 state |= PERF_GUEST_USER;
6427
6428 return state;
6429 }
6430
kvm_guest_get_ip(void)6431 static unsigned long kvm_guest_get_ip(void)
6432 {
6433 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6434
6435 /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6436 if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6437 return 0;
6438
6439 return kvm_arch_vcpu_get_ip(vcpu);
6440 }
6441
6442 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6443 .state = kvm_guest_state,
6444 .get_ip = kvm_guest_get_ip,
6445 .handle_intel_pt_intr = NULL,
6446 };
6447
kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void))6448 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6449 {
6450 kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6451 perf_register_guest_info_callbacks(&kvm_guest_cbs);
6452 }
kvm_unregister_perf_callbacks(void)6453 void kvm_unregister_perf_callbacks(void)
6454 {
6455 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6456 }
6457 #endif
6458
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)6459 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6460 {
6461 int r;
6462 int cpu;
6463
6464 /* A kmem cache lets us meet the alignment requirements of fx_save. */
6465 if (!vcpu_align)
6466 vcpu_align = __alignof__(struct kvm_vcpu);
6467 kvm_vcpu_cache =
6468 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6469 SLAB_ACCOUNT,
6470 offsetof(struct kvm_vcpu, arch),
6471 offsetofend(struct kvm_vcpu, stats_id)
6472 - offsetof(struct kvm_vcpu, arch),
6473 NULL);
6474 if (!kvm_vcpu_cache)
6475 return -ENOMEM;
6476
6477 for_each_possible_cpu(cpu) {
6478 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6479 GFP_KERNEL, cpu_to_node(cpu))) {
6480 r = -ENOMEM;
6481 goto err_cpu_kick_mask;
6482 }
6483 }
6484
6485 r = kvm_irqfd_init();
6486 if (r)
6487 goto err_irqfd;
6488
6489 r = kvm_async_pf_init();
6490 if (r)
6491 goto err_async_pf;
6492
6493 kvm_chardev_ops.owner = module;
6494 kvm_vm_fops.owner = module;
6495 kvm_vcpu_fops.owner = module;
6496 kvm_device_fops.owner = module;
6497
6498 kvm_preempt_ops.sched_in = kvm_sched_in;
6499 kvm_preempt_ops.sched_out = kvm_sched_out;
6500
6501 kvm_init_debug();
6502
6503 r = kvm_vfio_ops_init();
6504 if (WARN_ON_ONCE(r))
6505 goto err_vfio;
6506
6507 kvm_gmem_init(module);
6508
6509 r = kvm_init_virtualization();
6510 if (r)
6511 goto err_virt;
6512
6513 /*
6514 * Registration _must_ be the very last thing done, as this exposes
6515 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6516 */
6517 r = misc_register(&kvm_dev);
6518 if (r) {
6519 pr_err("kvm: misc device register failed\n");
6520 goto err_register;
6521 }
6522
6523 return 0;
6524
6525 err_register:
6526 kvm_uninit_virtualization();
6527 err_virt:
6528 kvm_vfio_ops_exit();
6529 err_vfio:
6530 kvm_async_pf_deinit();
6531 err_async_pf:
6532 kvm_irqfd_exit();
6533 err_irqfd:
6534 err_cpu_kick_mask:
6535 for_each_possible_cpu(cpu)
6536 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6537 kmem_cache_destroy(kvm_vcpu_cache);
6538 return r;
6539 }
6540 EXPORT_SYMBOL_GPL(kvm_init);
6541
kvm_exit(void)6542 void kvm_exit(void)
6543 {
6544 int cpu;
6545
6546 /*
6547 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6548 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6549 * to KVM while the module is being stopped.
6550 */
6551 misc_deregister(&kvm_dev);
6552
6553 kvm_uninit_virtualization();
6554
6555 debugfs_remove_recursive(kvm_debugfs_dir);
6556 for_each_possible_cpu(cpu)
6557 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6558 kmem_cache_destroy(kvm_vcpu_cache);
6559 kvm_vfio_ops_exit();
6560 kvm_async_pf_deinit();
6561 kvm_irqfd_exit();
6562 }
6563 EXPORT_SYMBOL_GPL(kvm_exit);
6564
6565 struct kvm_vm_worker_thread_context {
6566 struct kvm *kvm;
6567 struct task_struct *parent;
6568 struct completion init_done;
6569 kvm_vm_thread_fn_t thread_fn;
6570 uintptr_t data;
6571 int err;
6572 };
6573
kvm_vm_worker_thread(void * context)6574 static int kvm_vm_worker_thread(void *context)
6575 {
6576 /*
6577 * The init_context is allocated on the stack of the parent thread, so
6578 * we have to locally copy anything that is needed beyond initialization
6579 */
6580 struct kvm_vm_worker_thread_context *init_context = context;
6581 struct task_struct *parent;
6582 struct kvm *kvm = init_context->kvm;
6583 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6584 uintptr_t data = init_context->data;
6585 int err;
6586
6587 err = kthread_park(current);
6588 /* kthread_park(current) is never supposed to return an error */
6589 WARN_ON(err != 0);
6590 if (err)
6591 goto init_complete;
6592
6593 err = cgroup_attach_task_all(init_context->parent, current);
6594 if (err) {
6595 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6596 __func__, err);
6597 goto init_complete;
6598 }
6599
6600 set_user_nice(current, task_nice(init_context->parent));
6601
6602 init_complete:
6603 init_context->err = err;
6604 complete(&init_context->init_done);
6605 init_context = NULL;
6606
6607 if (err)
6608 goto out;
6609
6610 /* Wait to be woken up by the spawner before proceeding. */
6611 kthread_parkme();
6612
6613 if (!kthread_should_stop())
6614 err = thread_fn(kvm, data);
6615
6616 out:
6617 /*
6618 * Move kthread back to its original cgroup to prevent it lingering in
6619 * the cgroup of the VM process, after the latter finishes its
6620 * execution.
6621 *
6622 * kthread_stop() waits on the 'exited' completion condition which is
6623 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6624 * kthread is removed from the cgroup in the cgroup_exit() which is
6625 * called after the exit_mm(). This causes the kthread_stop() to return
6626 * before the kthread actually quits the cgroup.
6627 */
6628 rcu_read_lock();
6629 parent = rcu_dereference(current->real_parent);
6630 get_task_struct(parent);
6631 rcu_read_unlock();
6632 cgroup_attach_task_all(parent, current);
6633 put_task_struct(parent);
6634
6635 return err;
6636 }
6637
kvm_vm_create_worker_thread(struct kvm * kvm,kvm_vm_thread_fn_t thread_fn,uintptr_t data,const char * name,struct task_struct ** thread_ptr)6638 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6639 uintptr_t data, const char *name,
6640 struct task_struct **thread_ptr)
6641 {
6642 struct kvm_vm_worker_thread_context init_context = {};
6643 struct task_struct *thread;
6644
6645 *thread_ptr = NULL;
6646 init_context.kvm = kvm;
6647 init_context.parent = current;
6648 init_context.thread_fn = thread_fn;
6649 init_context.data = data;
6650 init_completion(&init_context.init_done);
6651
6652 thread = kthread_run(kvm_vm_worker_thread, &init_context,
6653 "%s-%d", name, task_pid_nr(current));
6654 if (IS_ERR(thread))
6655 return PTR_ERR(thread);
6656
6657 /* kthread_run is never supposed to return NULL */
6658 WARN_ON(thread == NULL);
6659
6660 wait_for_completion(&init_context.init_done);
6661
6662 if (!init_context.err)
6663 *thread_ptr = thread;
6664
6665 return init_context.err;
6666 }
6667