xref: /linux/virt/kvm/kvm_main.c (revision 949d0a46ad1b9ab3450fb6ed69ff1e3e13c657bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine (KVM) Hypervisor
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  *
8  * Authors:
9  *   Avi Kivity   <avi@qumranet.com>
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  */
12 
13 #include <kvm/iodev.h>
14 
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 #include <linux/sort.h>
47 #include <linux/bsearch.h>
48 #include <linux/io.h>
49 #include <linux/lockdep.h>
50 #include <linux/kthread.h>
51 #include <linux/suspend.h>
52 #include <linux/rseq.h>
53 
54 #include <asm/processor.h>
55 #include <asm/ioctl.h>
56 #include <linux/uaccess.h>
57 
58 #include "coalesced_mmio.h"
59 #include "async_pf.h"
60 #include "kvm_mm.h"
61 #include "vfio.h"
62 
63 #include <trace/events/ipi.h>
64 
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/kvm.h>
67 
68 #include <linux/kvm_dirty_ring.h>
69 
70 
71 /* Worst case buffer size needed for holding an integer. */
72 #define ITOA_MAX_LEN 12
73 
74 MODULE_AUTHOR("Qumranet");
75 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
76 MODULE_LICENSE("GPL");
77 
78 /* Architectures should define their poll value according to the halt latency */
79 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
80 module_param(halt_poll_ns, uint, 0644);
81 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
82 
83 /* Default doubles per-vcpu halt_poll_ns. */
84 unsigned int halt_poll_ns_grow = 2;
85 module_param(halt_poll_ns_grow, uint, 0644);
86 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
87 
88 /* The start value to grow halt_poll_ns from */
89 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
90 module_param(halt_poll_ns_grow_start, uint, 0644);
91 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
92 
93 /* Default halves per-vcpu halt_poll_ns. */
94 unsigned int halt_poll_ns_shrink = 2;
95 module_param(halt_poll_ns_shrink, uint, 0644);
96 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
97 
98 /*
99  * Allow direct access (from KVM or the CPU) without MMU notifier protection
100  * to unpinned pages.
101  */
102 static bool allow_unsafe_mappings;
103 module_param(allow_unsafe_mappings, bool, 0444);
104 
105 /*
106  * Ordering of locks:
107  *
108  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
109  */
110 
111 DEFINE_MUTEX(kvm_lock);
112 LIST_HEAD(vm_list);
113 
114 static struct kmem_cache *kvm_vcpu_cache;
115 
116 static __read_mostly struct preempt_ops kvm_preempt_ops;
117 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
118 
119 static struct dentry *kvm_debugfs_dir;
120 
121 static const struct file_operations stat_fops_per_vm;
122 
123 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
124 			   unsigned long arg);
125 #ifdef CONFIG_KVM_COMPAT
126 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
127 				  unsigned long arg);
128 #define KVM_COMPAT(c)	.compat_ioctl	= (c)
129 #else
130 /*
131  * For architectures that don't implement a compat infrastructure,
132  * adopt a double line of defense:
133  * - Prevent a compat task from opening /dev/kvm
134  * - If the open has been done by a 64bit task, and the KVM fd
135  *   passed to a compat task, let the ioctls fail.
136  */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)137 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
138 				unsigned long arg) { return -EINVAL; }
139 
kvm_no_compat_open(struct inode * inode,struct file * file)140 static int kvm_no_compat_open(struct inode *inode, struct file *file)
141 {
142 	return is_compat_task() ? -ENODEV : 0;
143 }
144 #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
145 			.open		= kvm_no_compat_open
146 #endif
147 
148 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
149 
150 #define KVM_EVENT_CREATE_VM 0
151 #define KVM_EVENT_DESTROY_VM 1
152 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
153 static unsigned long long kvm_createvm_count;
154 static unsigned long long kvm_active_vms;
155 
156 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
157 
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)158 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
159 {
160 }
161 
162 /*
163  * Switches to specified vcpu, until a matching vcpu_put()
164  */
vcpu_load(struct kvm_vcpu * vcpu)165 void vcpu_load(struct kvm_vcpu *vcpu)
166 {
167 	int cpu = get_cpu();
168 
169 	__this_cpu_write(kvm_running_vcpu, vcpu);
170 	preempt_notifier_register(&vcpu->preempt_notifier);
171 	kvm_arch_vcpu_load(vcpu, cpu);
172 	put_cpu();
173 }
174 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
175 
vcpu_put(struct kvm_vcpu * vcpu)176 void vcpu_put(struct kvm_vcpu *vcpu)
177 {
178 	preempt_disable();
179 	kvm_arch_vcpu_put(vcpu);
180 	preempt_notifier_unregister(&vcpu->preempt_notifier);
181 	__this_cpu_write(kvm_running_vcpu, NULL);
182 	preempt_enable();
183 }
184 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
185 
186 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)187 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
188 {
189 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
190 
191 	/*
192 	 * We need to wait for the VCPU to reenable interrupts and get out of
193 	 * READING_SHADOW_PAGE_TABLES mode.
194 	 */
195 	if (req & KVM_REQUEST_WAIT)
196 		return mode != OUTSIDE_GUEST_MODE;
197 
198 	/*
199 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
200 	 */
201 	return mode == IN_GUEST_MODE;
202 }
203 
ack_kick(void * _completed)204 static void ack_kick(void *_completed)
205 {
206 }
207 
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)208 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
209 {
210 	if (cpumask_empty(cpus))
211 		return false;
212 
213 	smp_call_function_many(cpus, ack_kick, NULL, wait);
214 	return true;
215 }
216 
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)217 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
218 				  struct cpumask *tmp, int current_cpu)
219 {
220 	int cpu;
221 
222 	if (likely(!(req & KVM_REQUEST_NO_ACTION)))
223 		__kvm_make_request(req, vcpu);
224 
225 	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
226 		return;
227 
228 	/*
229 	 * Note, the vCPU could get migrated to a different pCPU at any point
230 	 * after kvm_request_needs_ipi(), which could result in sending an IPI
231 	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
232 	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
233 	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
234 	 * after this point is also OK, as the requirement is only that KVM wait
235 	 * for vCPUs that were reading SPTEs _before_ any changes were
236 	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
237 	 */
238 	if (kvm_request_needs_ipi(vcpu, req)) {
239 		cpu = READ_ONCE(vcpu->cpu);
240 		if (cpu != -1 && cpu != current_cpu)
241 			__cpumask_set_cpu(cpu, tmp);
242 	}
243 }
244 
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)245 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
246 				 unsigned long *vcpu_bitmap)
247 {
248 	struct kvm_vcpu *vcpu;
249 	struct cpumask *cpus;
250 	int i, me;
251 	bool called;
252 
253 	me = get_cpu();
254 
255 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
256 	cpumask_clear(cpus);
257 
258 	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
259 		vcpu = kvm_get_vcpu(kvm, i);
260 		if (!vcpu)
261 			continue;
262 		kvm_make_vcpu_request(vcpu, req, cpus, me);
263 	}
264 
265 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
266 	put_cpu();
267 
268 	return called;
269 }
270 
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)271 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
272 {
273 	struct kvm_vcpu *vcpu;
274 	struct cpumask *cpus;
275 	unsigned long i;
276 	bool called;
277 	int me;
278 
279 	me = get_cpu();
280 
281 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
282 	cpumask_clear(cpus);
283 
284 	kvm_for_each_vcpu(i, vcpu, kvm)
285 		kvm_make_vcpu_request(vcpu, req, cpus, me);
286 
287 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
288 	put_cpu();
289 
290 	return called;
291 }
292 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
293 
kvm_flush_remote_tlbs(struct kvm * kvm)294 void kvm_flush_remote_tlbs(struct kvm *kvm)
295 {
296 	++kvm->stat.generic.remote_tlb_flush_requests;
297 
298 	/*
299 	 * We want to publish modifications to the page tables before reading
300 	 * mode. Pairs with a memory barrier in arch-specific code.
301 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
302 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
303 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
304 	 *
305 	 * There is already an smp_mb__after_atomic() before
306 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
307 	 * barrier here.
308 	 */
309 	if (!kvm_arch_flush_remote_tlbs(kvm)
310 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
311 		++kvm->stat.generic.remote_tlb_flush;
312 }
313 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
314 
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)315 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
316 {
317 	if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
318 		return;
319 
320 	/*
321 	 * Fall back to a flushing entire TLBs if the architecture range-based
322 	 * TLB invalidation is unsupported or can't be performed for whatever
323 	 * reason.
324 	 */
325 	kvm_flush_remote_tlbs(kvm);
326 }
327 
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)328 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
329 				   const struct kvm_memory_slot *memslot)
330 {
331 	/*
332 	 * All current use cases for flushing the TLBs for a specific memslot
333 	 * are related to dirty logging, and many do the TLB flush out of
334 	 * mmu_lock. The interaction between the various operations on memslot
335 	 * must be serialized by slots_lock to ensure the TLB flush from one
336 	 * operation is observed by any other operation on the same memslot.
337 	 */
338 	lockdep_assert_held(&kvm->slots_lock);
339 	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
340 }
341 
kvm_flush_shadow_all(struct kvm * kvm)342 static void kvm_flush_shadow_all(struct kvm *kvm)
343 {
344 	kvm_arch_flush_shadow_all(kvm);
345 	kvm_arch_guest_memory_reclaimed(kvm);
346 }
347 
348 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)349 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
350 					       gfp_t gfp_flags)
351 {
352 	void *page;
353 
354 	gfp_flags |= mc->gfp_zero;
355 
356 	if (mc->kmem_cache)
357 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
358 
359 	page = (void *)__get_free_page(gfp_flags);
360 	if (page && mc->init_value)
361 		memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
362 	return page;
363 }
364 
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)365 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
366 {
367 	gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
368 	void *obj;
369 
370 	if (mc->nobjs >= min)
371 		return 0;
372 
373 	if (unlikely(!mc->objects)) {
374 		if (WARN_ON_ONCE(!capacity))
375 			return -EIO;
376 
377 		/*
378 		 * Custom init values can be used only for page allocations,
379 		 * and obviously conflict with __GFP_ZERO.
380 		 */
381 		if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
382 			return -EIO;
383 
384 		mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
385 		if (!mc->objects)
386 			return -ENOMEM;
387 
388 		mc->capacity = capacity;
389 	}
390 
391 	/* It is illegal to request a different capacity across topups. */
392 	if (WARN_ON_ONCE(mc->capacity != capacity))
393 		return -EIO;
394 
395 	while (mc->nobjs < mc->capacity) {
396 		obj = mmu_memory_cache_alloc_obj(mc, gfp);
397 		if (!obj)
398 			return mc->nobjs >= min ? 0 : -ENOMEM;
399 		mc->objects[mc->nobjs++] = obj;
400 	}
401 	return 0;
402 }
403 
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)404 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
405 {
406 	return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
407 }
408 
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)409 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
410 {
411 	return mc->nobjs;
412 }
413 
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)414 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
415 {
416 	while (mc->nobjs) {
417 		if (mc->kmem_cache)
418 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
419 		else
420 			free_page((unsigned long)mc->objects[--mc->nobjs]);
421 	}
422 
423 	kvfree(mc->objects);
424 
425 	mc->objects = NULL;
426 	mc->capacity = 0;
427 }
428 
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)429 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
430 {
431 	void *p;
432 
433 	if (WARN_ON(!mc->nobjs))
434 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
435 	else
436 		p = mc->objects[--mc->nobjs];
437 	BUG_ON(!p);
438 	return p;
439 }
440 #endif
441 
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)442 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
443 {
444 	mutex_init(&vcpu->mutex);
445 	vcpu->cpu = -1;
446 	vcpu->kvm = kvm;
447 	vcpu->vcpu_id = id;
448 	vcpu->pid = NULL;
449 	rwlock_init(&vcpu->pid_lock);
450 #ifndef __KVM_HAVE_ARCH_WQP
451 	rcuwait_init(&vcpu->wait);
452 #endif
453 	kvm_async_pf_vcpu_init(vcpu);
454 
455 	kvm_vcpu_set_in_spin_loop(vcpu, false);
456 	kvm_vcpu_set_dy_eligible(vcpu, false);
457 	vcpu->preempted = false;
458 	vcpu->ready = false;
459 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
460 	vcpu->last_used_slot = NULL;
461 
462 	/* Fill the stats id string for the vcpu */
463 	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
464 		 task_pid_nr(current), id);
465 }
466 
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)467 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
468 {
469 	kvm_arch_vcpu_destroy(vcpu);
470 	kvm_dirty_ring_free(&vcpu->dirty_ring);
471 
472 	/*
473 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
474 	 * the vcpu->pid pointer, and at destruction time all file descriptors
475 	 * are already gone.
476 	 */
477 	put_pid(vcpu->pid);
478 
479 	free_page((unsigned long)vcpu->run);
480 	kmem_cache_free(kvm_vcpu_cache, vcpu);
481 }
482 
kvm_destroy_vcpus(struct kvm * kvm)483 void kvm_destroy_vcpus(struct kvm *kvm)
484 {
485 	unsigned long i;
486 	struct kvm_vcpu *vcpu;
487 
488 	kvm_for_each_vcpu(i, vcpu, kvm) {
489 		kvm_vcpu_destroy(vcpu);
490 		xa_erase(&kvm->vcpu_array, i);
491 
492 		/*
493 		 * Assert that the vCPU isn't visible in any way, to ensure KVM
494 		 * doesn't trigger a use-after-free if destroying vCPUs results
495 		 * in VM-wide request, e.g. to flush remote TLBs when tearing
496 		 * down MMUs, or to mark the VM dead if a KVM_BUG_ON() fires.
497 		 */
498 		WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i));
499 	}
500 
501 	atomic_set(&kvm->online_vcpus, 0);
502 }
503 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
504 
mmu_notifier_to_kvm(struct mmu_notifier * mn)505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
506 {
507 	return container_of(mn, struct kvm, mmu_notifier);
508 }
509 
510 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
511 
512 typedef void (*on_lock_fn_t)(struct kvm *kvm);
513 
514 struct kvm_mmu_notifier_range {
515 	/*
516 	 * 64-bit addresses, as KVM notifiers can operate on host virtual
517 	 * addresses (unsigned long) and guest physical addresses (64-bit).
518 	 */
519 	u64 start;
520 	u64 end;
521 	union kvm_mmu_notifier_arg arg;
522 	gfn_handler_t handler;
523 	on_lock_fn_t on_lock;
524 	bool flush_on_ret;
525 	bool may_block;
526 	bool lockless;
527 };
528 
529 /*
530  * The inner-most helper returns a tuple containing the return value from the
531  * arch- and action-specific handler, plus a flag indicating whether or not at
532  * least one memslot was found, i.e. if the handler found guest memory.
533  *
534  * Note, most notifiers are averse to booleans, so even though KVM tracks the
535  * return from arch code as a bool, outer helpers will cast it to an int. :-(
536  */
537 typedef struct kvm_mmu_notifier_return {
538 	bool ret;
539 	bool found_memslot;
540 } kvm_mn_ret_t;
541 
542 /*
543  * Use a dedicated stub instead of NULL to indicate that there is no callback
544  * function/handler.  The compiler technically can't guarantee that a real
545  * function will have a non-zero address, and so it will generate code to
546  * check for !NULL, whereas comparing against a stub will be elided at compile
547  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
548  */
kvm_null_fn(void)549 static void kvm_null_fn(void)
550 {
551 
552 }
553 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
554 
555 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
556 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
557 	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
558 	     node;							     \
559 	     node = interval_tree_iter_next(node, start, last))	     \
560 
kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range)561 static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
562 							 const struct kvm_mmu_notifier_range *range)
563 {
564 	struct kvm_mmu_notifier_return r = {
565 		.ret = false,
566 		.found_memslot = false,
567 	};
568 	struct kvm_gfn_range gfn_range;
569 	struct kvm_memory_slot *slot;
570 	struct kvm_memslots *slots;
571 	int i, idx;
572 
573 	if (WARN_ON_ONCE(range->end <= range->start))
574 		return r;
575 
576 	/* A null handler is allowed if and only if on_lock() is provided. */
577 	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
578 			 IS_KVM_NULL_FN(range->handler)))
579 		return r;
580 
581 	/* on_lock will never be called for lockless walks */
582 	if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
583 		return r;
584 
585 	idx = srcu_read_lock(&kvm->srcu);
586 
587 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
588 		struct interval_tree_node *node;
589 
590 		slots = __kvm_memslots(kvm, i);
591 		kvm_for_each_memslot_in_hva_range(node, slots,
592 						  range->start, range->end - 1) {
593 			unsigned long hva_start, hva_end;
594 
595 			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
596 			hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
597 			hva_end = min_t(unsigned long, range->end,
598 					slot->userspace_addr + (slot->npages << PAGE_SHIFT));
599 
600 			/*
601 			 * To optimize for the likely case where the address
602 			 * range is covered by zero or one memslots, don't
603 			 * bother making these conditional (to avoid writes on
604 			 * the second or later invocation of the handler).
605 			 */
606 			gfn_range.arg = range->arg;
607 			gfn_range.may_block = range->may_block;
608 			/*
609 			 * HVA-based notifications aren't relevant to private
610 			 * mappings as they don't have a userspace mapping.
611 			 */
612 			gfn_range.attr_filter = KVM_FILTER_SHARED;
613 
614 			/*
615 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
616 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
617 			 */
618 			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
619 			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
620 			gfn_range.slot = slot;
621 			gfn_range.lockless = range->lockless;
622 
623 			if (!r.found_memslot) {
624 				r.found_memslot = true;
625 				if (!range->lockless) {
626 					KVM_MMU_LOCK(kvm);
627 					if (!IS_KVM_NULL_FN(range->on_lock))
628 						range->on_lock(kvm);
629 
630 					if (IS_KVM_NULL_FN(range->handler))
631 						goto mmu_unlock;
632 				}
633 			}
634 			r.ret |= range->handler(kvm, &gfn_range);
635 		}
636 	}
637 
638 	if (range->flush_on_ret && r.ret)
639 		kvm_flush_remote_tlbs(kvm);
640 
641 mmu_unlock:
642 	if (r.found_memslot && !range->lockless)
643 		KVM_MMU_UNLOCK(kvm);
644 
645 	srcu_read_unlock(&kvm->srcu, idx);
646 
647 	return r;
648 }
649 
kvm_age_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler,bool flush_on_ret)650 static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
651 						unsigned long start,
652 						unsigned long end,
653 						gfn_handler_t handler,
654 						bool flush_on_ret)
655 {
656 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
657 	const struct kvm_mmu_notifier_range range = {
658 		.start		= start,
659 		.end		= end,
660 		.handler	= handler,
661 		.on_lock	= (void *)kvm_null_fn,
662 		.flush_on_ret	= flush_on_ret,
663 		.may_block	= false,
664 		.lockless	= IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING),
665 	};
666 
667 	return kvm_handle_hva_range(kvm, &range).ret;
668 }
669 
kvm_age_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)670 static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
671 						      unsigned long start,
672 						      unsigned long end,
673 						      gfn_handler_t handler)
674 {
675 	return kvm_age_hva_range(mn, start, end, handler, false);
676 }
677 
kvm_mmu_invalidate_begin(struct kvm * kvm)678 void kvm_mmu_invalidate_begin(struct kvm *kvm)
679 {
680 	lockdep_assert_held_write(&kvm->mmu_lock);
681 	/*
682 	 * The count increase must become visible at unlock time as no
683 	 * spte can be established without taking the mmu_lock and
684 	 * count is also read inside the mmu_lock critical section.
685 	 */
686 	kvm->mmu_invalidate_in_progress++;
687 
688 	if (likely(kvm->mmu_invalidate_in_progress == 1)) {
689 		kvm->mmu_invalidate_range_start = INVALID_GPA;
690 		kvm->mmu_invalidate_range_end = INVALID_GPA;
691 	}
692 }
693 
kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end)694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
695 {
696 	lockdep_assert_held_write(&kvm->mmu_lock);
697 
698 	WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
699 
700 	if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
701 		kvm->mmu_invalidate_range_start = start;
702 		kvm->mmu_invalidate_range_end = end;
703 	} else {
704 		/*
705 		 * Fully tracking multiple concurrent ranges has diminishing
706 		 * returns. Keep things simple and just find the minimal range
707 		 * which includes the current and new ranges. As there won't be
708 		 * enough information to subtract a range after its invalidate
709 		 * completes, any ranges invalidated concurrently will
710 		 * accumulate and persist until all outstanding invalidates
711 		 * complete.
712 		 */
713 		kvm->mmu_invalidate_range_start =
714 			min(kvm->mmu_invalidate_range_start, start);
715 		kvm->mmu_invalidate_range_end =
716 			max(kvm->mmu_invalidate_range_end, end);
717 	}
718 }
719 
kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)720 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
721 {
722 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
723 	return kvm_unmap_gfn_range(kvm, range);
724 }
725 
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)726 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
727 					const struct mmu_notifier_range *range)
728 {
729 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
730 	const struct kvm_mmu_notifier_range hva_range = {
731 		.start		= range->start,
732 		.end		= range->end,
733 		.handler	= kvm_mmu_unmap_gfn_range,
734 		.on_lock	= kvm_mmu_invalidate_begin,
735 		.flush_on_ret	= true,
736 		.may_block	= mmu_notifier_range_blockable(range),
737 	};
738 
739 	trace_kvm_unmap_hva_range(range->start, range->end);
740 
741 	/*
742 	 * Prevent memslot modification between range_start() and range_end()
743 	 * so that conditionally locking provides the same result in both
744 	 * functions.  Without that guarantee, the mmu_invalidate_in_progress
745 	 * adjustments will be imbalanced.
746 	 *
747 	 * Pairs with the decrement in range_end().
748 	 */
749 	spin_lock(&kvm->mn_invalidate_lock);
750 	kvm->mn_active_invalidate_count++;
751 	spin_unlock(&kvm->mn_invalidate_lock);
752 
753 	/*
754 	 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
755 	 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
756 	 * each cache's lock.  There are relatively few caches in existence at
757 	 * any given time, and the caches themselves can check for hva overlap,
758 	 * i.e. don't need to rely on memslot overlap checks for performance.
759 	 * Because this runs without holding mmu_lock, the pfn caches must use
760 	 * mn_active_invalidate_count (see above) instead of
761 	 * mmu_invalidate_in_progress.
762 	 */
763 	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
764 
765 	/*
766 	 * If one or more memslots were found and thus zapped, notify arch code
767 	 * that guest memory has been reclaimed.  This needs to be done *after*
768 	 * dropping mmu_lock, as x86's reclaim path is slooooow.
769 	 */
770 	if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
771 		kvm_arch_guest_memory_reclaimed(kvm);
772 
773 	return 0;
774 }
775 
kvm_mmu_invalidate_end(struct kvm * kvm)776 void kvm_mmu_invalidate_end(struct kvm *kvm)
777 {
778 	lockdep_assert_held_write(&kvm->mmu_lock);
779 
780 	/*
781 	 * This sequence increase will notify the kvm page fault that
782 	 * the page that is going to be mapped in the spte could have
783 	 * been freed.
784 	 */
785 	kvm->mmu_invalidate_seq++;
786 	smp_wmb();
787 	/*
788 	 * The above sequence increase must be visible before the
789 	 * below count decrease, which is ensured by the smp_wmb above
790 	 * in conjunction with the smp_rmb in mmu_invalidate_retry().
791 	 */
792 	kvm->mmu_invalidate_in_progress--;
793 	KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
794 
795 	/*
796 	 * Assert that at least one range was added between start() and end().
797 	 * Not adding a range isn't fatal, but it is a KVM bug.
798 	 */
799 	WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
800 }
801 
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)802 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
803 					const struct mmu_notifier_range *range)
804 {
805 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
806 	const struct kvm_mmu_notifier_range hva_range = {
807 		.start		= range->start,
808 		.end		= range->end,
809 		.handler	= (void *)kvm_null_fn,
810 		.on_lock	= kvm_mmu_invalidate_end,
811 		.flush_on_ret	= false,
812 		.may_block	= mmu_notifier_range_blockable(range),
813 	};
814 	bool wake;
815 
816 	kvm_handle_hva_range(kvm, &hva_range);
817 
818 	/* Pairs with the increment in range_start(). */
819 	spin_lock(&kvm->mn_invalidate_lock);
820 	if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
821 		--kvm->mn_active_invalidate_count;
822 	wake = !kvm->mn_active_invalidate_count;
823 	spin_unlock(&kvm->mn_invalidate_lock);
824 
825 	/*
826 	 * There can only be one waiter, since the wait happens under
827 	 * slots_lock.
828 	 */
829 	if (wake)
830 		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
831 }
832 
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)833 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
834 					      struct mm_struct *mm,
835 					      unsigned long start,
836 					      unsigned long end)
837 {
838 	trace_kvm_age_hva(start, end);
839 
840 	return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
841 				 !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
842 }
843 
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)844 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
845 					struct mm_struct *mm,
846 					unsigned long start,
847 					unsigned long end)
848 {
849 	trace_kvm_age_hva(start, end);
850 
851 	/*
852 	 * Even though we do not flush TLB, this will still adversely
853 	 * affect performance on pre-Haswell Intel EPT, where there is
854 	 * no EPT Access Bit to clear so that we have to tear down EPT
855 	 * tables instead. If we find this unacceptable, we can always
856 	 * add a parameter to kvm_age_hva so that it effectively doesn't
857 	 * do anything on clear_young.
858 	 *
859 	 * Also note that currently we never issue secondary TLB flushes
860 	 * from clear_young, leaving this job up to the regular system
861 	 * cadence. If we find this inaccurate, we might come up with a
862 	 * more sophisticated heuristic later.
863 	 */
864 	return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
865 }
866 
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)867 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
868 				       struct mm_struct *mm,
869 				       unsigned long address)
870 {
871 	trace_kvm_test_age_hva(address);
872 
873 	return kvm_age_hva_range_no_flush(mn, address, address + 1,
874 					  kvm_test_age_gfn);
875 }
876 
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)877 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
878 				     struct mm_struct *mm)
879 {
880 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
881 	int idx;
882 
883 	idx = srcu_read_lock(&kvm->srcu);
884 	kvm_flush_shadow_all(kvm);
885 	srcu_read_unlock(&kvm->srcu, idx);
886 }
887 
888 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
889 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
890 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
891 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
892 	.clear_young		= kvm_mmu_notifier_clear_young,
893 	.test_young		= kvm_mmu_notifier_test_young,
894 	.release		= kvm_mmu_notifier_release,
895 };
896 
kvm_init_mmu_notifier(struct kvm * kvm)897 static int kvm_init_mmu_notifier(struct kvm *kvm)
898 {
899 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
900 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
901 }
902 
903 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)904 static int kvm_pm_notifier_call(struct notifier_block *bl,
905 				unsigned long state,
906 				void *unused)
907 {
908 	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
909 
910 	return kvm_arch_pm_notifier(kvm, state);
911 }
912 
kvm_init_pm_notifier(struct kvm * kvm)913 static void kvm_init_pm_notifier(struct kvm *kvm)
914 {
915 	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
916 	/* Suspend KVM before we suspend ftrace, RCU, etc. */
917 	kvm->pm_notifier.priority = INT_MAX;
918 	register_pm_notifier(&kvm->pm_notifier);
919 }
920 
kvm_destroy_pm_notifier(struct kvm * kvm)921 static void kvm_destroy_pm_notifier(struct kvm *kvm)
922 {
923 	unregister_pm_notifier(&kvm->pm_notifier);
924 }
925 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)926 static void kvm_init_pm_notifier(struct kvm *kvm)
927 {
928 }
929 
kvm_destroy_pm_notifier(struct kvm * kvm)930 static void kvm_destroy_pm_notifier(struct kvm *kvm)
931 {
932 }
933 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
934 
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)935 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
936 {
937 	if (!memslot->dirty_bitmap)
938 		return;
939 
940 	vfree(memslot->dirty_bitmap);
941 	memslot->dirty_bitmap = NULL;
942 }
943 
944 /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)945 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
946 {
947 	if (slot->flags & KVM_MEM_GUEST_MEMFD)
948 		kvm_gmem_unbind(slot);
949 
950 	kvm_destroy_dirty_bitmap(slot);
951 
952 	kvm_arch_free_memslot(kvm, slot);
953 
954 	kfree(slot);
955 }
956 
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)957 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
958 {
959 	struct hlist_node *idnode;
960 	struct kvm_memory_slot *memslot;
961 	int bkt;
962 
963 	/*
964 	 * The same memslot objects live in both active and inactive sets,
965 	 * arbitrarily free using index '1' so the second invocation of this
966 	 * function isn't operating over a structure with dangling pointers
967 	 * (even though this function isn't actually touching them).
968 	 */
969 	if (!slots->node_idx)
970 		return;
971 
972 	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
973 		kvm_free_memslot(kvm, memslot);
974 }
975 
kvm_stats_debugfs_mode(const struct _kvm_stats_desc * pdesc)976 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
977 {
978 	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
979 	case KVM_STATS_TYPE_INSTANT:
980 		return 0444;
981 	case KVM_STATS_TYPE_CUMULATIVE:
982 	case KVM_STATS_TYPE_PEAK:
983 	default:
984 		return 0644;
985 	}
986 }
987 
988 
kvm_destroy_vm_debugfs(struct kvm * kvm)989 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
990 {
991 	int i;
992 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
993 				      kvm_vcpu_stats_header.num_desc;
994 
995 	if (IS_ERR(kvm->debugfs_dentry))
996 		return;
997 
998 	debugfs_remove_recursive(kvm->debugfs_dentry);
999 
1000 	if (kvm->debugfs_stat_data) {
1001 		for (i = 0; i < kvm_debugfs_num_entries; i++)
1002 			kfree(kvm->debugfs_stat_data[i]);
1003 		kfree(kvm->debugfs_stat_data);
1004 	}
1005 }
1006 
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)1007 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1008 {
1009 	static DEFINE_MUTEX(kvm_debugfs_lock);
1010 	struct dentry *dent;
1011 	char dir_name[ITOA_MAX_LEN * 2];
1012 	struct kvm_stat_data *stat_data;
1013 	const struct _kvm_stats_desc *pdesc;
1014 	int i, ret = -ENOMEM;
1015 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1016 				      kvm_vcpu_stats_header.num_desc;
1017 
1018 	if (!debugfs_initialized())
1019 		return 0;
1020 
1021 	snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1022 	mutex_lock(&kvm_debugfs_lock);
1023 	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1024 	if (dent) {
1025 		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1026 		dput(dent);
1027 		mutex_unlock(&kvm_debugfs_lock);
1028 		return 0;
1029 	}
1030 	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1031 	mutex_unlock(&kvm_debugfs_lock);
1032 	if (IS_ERR(dent))
1033 		return 0;
1034 
1035 	kvm->debugfs_dentry = dent;
1036 	kvm->debugfs_stat_data = kzalloc_objs(*kvm->debugfs_stat_data,
1037 					      kvm_debugfs_num_entries,
1038 					      GFP_KERNEL_ACCOUNT);
1039 	if (!kvm->debugfs_stat_data)
1040 		goto out_err;
1041 
1042 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1043 		pdesc = &kvm_vm_stats_desc[i];
1044 		stat_data = kzalloc_obj(*stat_data, GFP_KERNEL_ACCOUNT);
1045 		if (!stat_data)
1046 			goto out_err;
1047 
1048 		stat_data->kvm = kvm;
1049 		stat_data->desc = pdesc;
1050 		stat_data->kind = KVM_STAT_VM;
1051 		kvm->debugfs_stat_data[i] = stat_data;
1052 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1053 				    kvm->debugfs_dentry, stat_data,
1054 				    &stat_fops_per_vm);
1055 	}
1056 
1057 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1058 		pdesc = &kvm_vcpu_stats_desc[i];
1059 		stat_data = kzalloc_obj(*stat_data, GFP_KERNEL_ACCOUNT);
1060 		if (!stat_data)
1061 			goto out_err;
1062 
1063 		stat_data->kvm = kvm;
1064 		stat_data->desc = pdesc;
1065 		stat_data->kind = KVM_STAT_VCPU;
1066 		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1067 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1068 				    kvm->debugfs_dentry, stat_data,
1069 				    &stat_fops_per_vm);
1070 	}
1071 
1072 	kvm_arch_create_vm_debugfs(kvm);
1073 	return 0;
1074 out_err:
1075 	kvm_destroy_vm_debugfs(kvm);
1076 	return ret;
1077 }
1078 
1079 /*
1080  * Called just after removing the VM from the vm_list, but before doing any
1081  * other destruction.
1082  */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1083 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1084 {
1085 }
1086 
1087 /*
1088  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1089  * be setup already, so we can create arch-specific debugfs entries under it.
1090  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1091  * a per-arch destroy interface is not needed.
1092  */
kvm_arch_create_vm_debugfs(struct kvm * kvm)1093 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1094 {
1095 }
1096 
1097 /* Called only on cleanup and destruction paths when there are no users. */
kvm_get_bus_for_destruction(struct kvm * kvm,enum kvm_bus idx)1098 static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
1099 							     enum kvm_bus idx)
1100 {
1101 	return rcu_dereference_protected(kvm->buses[idx],
1102 					 !refcount_read(&kvm->users_count));
1103 }
1104 
kvm_create_vm(unsigned long type,const char * fdname)1105 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1106 {
1107 	struct kvm *kvm = kvm_arch_alloc_vm();
1108 	struct kvm_memslots *slots;
1109 	int r, i, j;
1110 
1111 	if (!kvm)
1112 		return ERR_PTR(-ENOMEM);
1113 
1114 	KVM_MMU_LOCK_INIT(kvm);
1115 	mmgrab(current->mm);
1116 	kvm->mm = current->mm;
1117 	kvm_eventfd_init(kvm);
1118 	mutex_init(&kvm->lock);
1119 	mutex_init(&kvm->irq_lock);
1120 	mutex_init(&kvm->slots_lock);
1121 	mutex_init(&kvm->slots_arch_lock);
1122 	spin_lock_init(&kvm->mn_invalidate_lock);
1123 	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1124 	xa_init(&kvm->vcpu_array);
1125 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1126 	xa_init(&kvm->mem_attr_array);
1127 #endif
1128 
1129 	INIT_LIST_HEAD(&kvm->gpc_list);
1130 	spin_lock_init(&kvm->gpc_lock);
1131 
1132 	INIT_LIST_HEAD(&kvm->devices);
1133 	kvm->max_vcpus = KVM_MAX_VCPUS;
1134 
1135 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1136 
1137 	/*
1138 	 * Force subsequent debugfs file creations to fail if the VM directory
1139 	 * is not created (by kvm_create_vm_debugfs()).
1140 	 */
1141 	kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1142 
1143 	snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1144 		 task_pid_nr(current));
1145 
1146 	r = -ENOMEM;
1147 	if (init_srcu_struct(&kvm->srcu))
1148 		goto out_err_no_srcu;
1149 	if (init_srcu_struct(&kvm->irq_srcu))
1150 		goto out_err_no_irq_srcu;
1151 
1152 	r = kvm_init_irq_routing(kvm);
1153 	if (r)
1154 		goto out_err_no_irq_routing;
1155 
1156 	refcount_set(&kvm->users_count, 1);
1157 
1158 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1159 		for (j = 0; j < 2; j++) {
1160 			slots = &kvm->__memslots[i][j];
1161 
1162 			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1163 			slots->hva_tree = RB_ROOT_CACHED;
1164 			slots->gfn_tree = RB_ROOT;
1165 			hash_init(slots->id_hash);
1166 			slots->node_idx = j;
1167 
1168 			/* Generations must be different for each address space. */
1169 			slots->generation = i;
1170 		}
1171 
1172 		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1173 	}
1174 
1175 	r = -ENOMEM;
1176 	for (i = 0; i < KVM_NR_BUSES; i++) {
1177 		rcu_assign_pointer(kvm->buses[i],
1178 			kzalloc_obj(struct kvm_io_bus, GFP_KERNEL_ACCOUNT));
1179 		if (!kvm->buses[i])
1180 			goto out_err_no_arch_destroy_vm;
1181 	}
1182 
1183 	r = kvm_arch_init_vm(kvm, type);
1184 	if (r)
1185 		goto out_err_no_arch_destroy_vm;
1186 
1187 	r = kvm_enable_virtualization();
1188 	if (r)
1189 		goto out_err_no_disable;
1190 
1191 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1192 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1193 #endif
1194 
1195 	r = kvm_init_mmu_notifier(kvm);
1196 	if (r)
1197 		goto out_err_no_mmu_notifier;
1198 
1199 	r = kvm_coalesced_mmio_init(kvm);
1200 	if (r < 0)
1201 		goto out_no_coalesced_mmio;
1202 
1203 	r = kvm_create_vm_debugfs(kvm, fdname);
1204 	if (r)
1205 		goto out_err_no_debugfs;
1206 
1207 	mutex_lock(&kvm_lock);
1208 	list_add(&kvm->vm_list, &vm_list);
1209 	mutex_unlock(&kvm_lock);
1210 
1211 	preempt_notifier_inc();
1212 	kvm_init_pm_notifier(kvm);
1213 
1214 	return kvm;
1215 
1216 out_err_no_debugfs:
1217 	kvm_coalesced_mmio_free(kvm);
1218 out_no_coalesced_mmio:
1219 	if (kvm->mmu_notifier.ops)
1220 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1221 out_err_no_mmu_notifier:
1222 	kvm_disable_virtualization();
1223 out_err_no_disable:
1224 	kvm_arch_destroy_vm(kvm);
1225 out_err_no_arch_destroy_vm:
1226 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1227 	for (i = 0; i < KVM_NR_BUSES; i++)
1228 		kfree(kvm_get_bus_for_destruction(kvm, i));
1229 	kvm_free_irq_routing(kvm);
1230 out_err_no_irq_routing:
1231 	cleanup_srcu_struct(&kvm->irq_srcu);
1232 out_err_no_irq_srcu:
1233 	cleanup_srcu_struct(&kvm->srcu);
1234 out_err_no_srcu:
1235 	kvm_arch_free_vm(kvm);
1236 	mmdrop(current->mm);
1237 	return ERR_PTR(r);
1238 }
1239 
kvm_destroy_devices(struct kvm * kvm)1240 static void kvm_destroy_devices(struct kvm *kvm)
1241 {
1242 	struct kvm_device *dev, *tmp;
1243 
1244 	/*
1245 	 * We do not need to take the kvm->lock here, because nobody else
1246 	 * has a reference to the struct kvm at this point and therefore
1247 	 * cannot access the devices list anyhow.
1248 	 *
1249 	 * The device list is generally managed as an rculist, but list_del()
1250 	 * is used intentionally here. If a bug in KVM introduced a reader that
1251 	 * was not backed by a reference on the kvm struct, the hope is that
1252 	 * it'd consume the poisoned forward pointer instead of suffering a
1253 	 * use-after-free, even though this cannot be guaranteed.
1254 	 */
1255 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1256 		list_del(&dev->vm_node);
1257 		dev->ops->destroy(dev);
1258 	}
1259 }
1260 
kvm_destroy_vm(struct kvm * kvm)1261 static void kvm_destroy_vm(struct kvm *kvm)
1262 {
1263 	int i;
1264 	struct mm_struct *mm = kvm->mm;
1265 
1266 	kvm_destroy_pm_notifier(kvm);
1267 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1268 	kvm_destroy_vm_debugfs(kvm);
1269 	mutex_lock(&kvm_lock);
1270 	list_del(&kvm->vm_list);
1271 	mutex_unlock(&kvm_lock);
1272 	kvm_arch_pre_destroy_vm(kvm);
1273 
1274 	kvm_free_irq_routing(kvm);
1275 	for (i = 0; i < KVM_NR_BUSES; i++) {
1276 		struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
1277 
1278 		if (bus)
1279 			kvm_io_bus_destroy(bus);
1280 		kvm->buses[i] = NULL;
1281 	}
1282 	kvm_coalesced_mmio_free(kvm);
1283 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1284 	/*
1285 	 * At this point, pending calls to invalidate_range_start()
1286 	 * have completed but no more MMU notifiers will run, so
1287 	 * mn_active_invalidate_count may remain unbalanced.
1288 	 * No threads can be waiting in kvm_swap_active_memslots() as the
1289 	 * last reference on KVM has been dropped, but freeing
1290 	 * memslots would deadlock without this manual intervention.
1291 	 *
1292 	 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1293 	 * notifier between a start() and end(), then there shouldn't be any
1294 	 * in-progress invalidations.
1295 	 */
1296 	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1297 	if (kvm->mn_active_invalidate_count)
1298 		kvm->mn_active_invalidate_count = 0;
1299 	else
1300 		WARN_ON(kvm->mmu_invalidate_in_progress);
1301 	kvm_arch_destroy_vm(kvm);
1302 	kvm_destroy_devices(kvm);
1303 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1304 		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1305 		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1306 	}
1307 	cleanup_srcu_struct(&kvm->irq_srcu);
1308 	srcu_barrier(&kvm->srcu);
1309 	cleanup_srcu_struct(&kvm->srcu);
1310 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1311 	xa_destroy(&kvm->mem_attr_array);
1312 #endif
1313 	kvm_arch_free_vm(kvm);
1314 	preempt_notifier_dec();
1315 	kvm_disable_virtualization();
1316 	mmdrop(mm);
1317 }
1318 
kvm_get_kvm(struct kvm * kvm)1319 void kvm_get_kvm(struct kvm *kvm)
1320 {
1321 	refcount_inc(&kvm->users_count);
1322 }
1323 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1324 
1325 /*
1326  * Make sure the vm is not during destruction, which is a safe version of
1327  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1328  */
kvm_get_kvm_safe(struct kvm * kvm)1329 bool kvm_get_kvm_safe(struct kvm *kvm)
1330 {
1331 	return refcount_inc_not_zero(&kvm->users_count);
1332 }
1333 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1334 
kvm_put_kvm(struct kvm * kvm)1335 void kvm_put_kvm(struct kvm *kvm)
1336 {
1337 	if (refcount_dec_and_test(&kvm->users_count))
1338 		kvm_destroy_vm(kvm);
1339 }
1340 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1341 
1342 /*
1343  * Used to put a reference that was taken on behalf of an object associated
1344  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1345  * of the new file descriptor fails and the reference cannot be transferred to
1346  * its final owner.  In such cases, the caller is still actively using @kvm and
1347  * will fail miserably if the refcount unexpectedly hits zero.
1348  */
kvm_put_kvm_no_destroy(struct kvm * kvm)1349 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1350 {
1351 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1352 }
1353 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
1354 
kvm_vm_release(struct inode * inode,struct file * filp)1355 static int kvm_vm_release(struct inode *inode, struct file *filp)
1356 {
1357 	struct kvm *kvm = filp->private_data;
1358 
1359 	kvm_irqfd_release(kvm);
1360 
1361 	kvm_put_kvm(kvm);
1362 	return 0;
1363 }
1364 
kvm_trylock_all_vcpus(struct kvm * kvm)1365 int kvm_trylock_all_vcpus(struct kvm *kvm)
1366 {
1367 	struct kvm_vcpu *vcpu;
1368 	unsigned long i, j;
1369 
1370 	lockdep_assert_held(&kvm->lock);
1371 
1372 	kvm_for_each_vcpu(i, vcpu, kvm)
1373 		if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock))
1374 			goto out_unlock;
1375 	return 0;
1376 
1377 out_unlock:
1378 	kvm_for_each_vcpu(j, vcpu, kvm) {
1379 		if (i == j)
1380 			break;
1381 		mutex_unlock(&vcpu->mutex);
1382 	}
1383 	return -EINTR;
1384 }
1385 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
1386 
kvm_lock_all_vcpus(struct kvm * kvm)1387 int kvm_lock_all_vcpus(struct kvm *kvm)
1388 {
1389 	struct kvm_vcpu *vcpu;
1390 	unsigned long i, j;
1391 	int r;
1392 
1393 	lockdep_assert_held(&kvm->lock);
1394 
1395 	kvm_for_each_vcpu(i, vcpu, kvm) {
1396 		r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock);
1397 		if (r)
1398 			goto out_unlock;
1399 	}
1400 	return 0;
1401 
1402 out_unlock:
1403 	kvm_for_each_vcpu(j, vcpu, kvm) {
1404 		if (i == j)
1405 			break;
1406 		mutex_unlock(&vcpu->mutex);
1407 	}
1408 	return r;
1409 }
1410 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
1411 
kvm_unlock_all_vcpus(struct kvm * kvm)1412 void kvm_unlock_all_vcpus(struct kvm *kvm)
1413 {
1414 	struct kvm_vcpu *vcpu;
1415 	unsigned long i;
1416 
1417 	lockdep_assert_held(&kvm->lock);
1418 
1419 	kvm_for_each_vcpu(i, vcpu, kvm)
1420 		mutex_unlock(&vcpu->mutex);
1421 }
1422 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
1423 
1424 /*
1425  * Allocation size is twice as large as the actual dirty bitmap size.
1426  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1427  */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)1428 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1429 {
1430 	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1431 
1432 	memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1433 	if (!memslot->dirty_bitmap)
1434 		return -ENOMEM;
1435 
1436 	return 0;
1437 }
1438 
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)1439 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1440 {
1441 	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1442 	int node_idx_inactive = active->node_idx ^ 1;
1443 
1444 	return &kvm->__memslots[as_id][node_idx_inactive];
1445 }
1446 
1447 /*
1448  * Helper to get the address space ID when one of memslot pointers may be NULL.
1449  * This also serves as a sanity that at least one of the pointers is non-NULL,
1450  * and that their address space IDs don't diverge.
1451  */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1452 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1453 				  struct kvm_memory_slot *b)
1454 {
1455 	if (WARN_ON_ONCE(!a && !b))
1456 		return 0;
1457 
1458 	if (!a)
1459 		return b->as_id;
1460 	if (!b)
1461 		return a->as_id;
1462 
1463 	WARN_ON_ONCE(a->as_id != b->as_id);
1464 	return a->as_id;
1465 }
1466 
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1467 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1468 				struct kvm_memory_slot *slot)
1469 {
1470 	struct rb_root *gfn_tree = &slots->gfn_tree;
1471 	struct rb_node **node, *parent;
1472 	int idx = slots->node_idx;
1473 
1474 	parent = NULL;
1475 	for (node = &gfn_tree->rb_node; *node; ) {
1476 		struct kvm_memory_slot *tmp;
1477 
1478 		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1479 		parent = *node;
1480 		if (slot->base_gfn < tmp->base_gfn)
1481 			node = &(*node)->rb_left;
1482 		else if (slot->base_gfn > tmp->base_gfn)
1483 			node = &(*node)->rb_right;
1484 		else
1485 			BUG();
1486 	}
1487 
1488 	rb_link_node(&slot->gfn_node[idx], parent, node);
1489 	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1490 }
1491 
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1492 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1493 			       struct kvm_memory_slot *slot)
1494 {
1495 	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1496 }
1497 
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1498 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1499 				 struct kvm_memory_slot *old,
1500 				 struct kvm_memory_slot *new)
1501 {
1502 	int idx = slots->node_idx;
1503 
1504 	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1505 
1506 	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1507 			&slots->gfn_tree);
1508 }
1509 
1510 /*
1511  * Replace @old with @new in the inactive memslots.
1512  *
1513  * With NULL @old this simply adds @new.
1514  * With NULL @new this simply removes @old.
1515  *
1516  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1517  * appropriately.
1518  */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1519 static void kvm_replace_memslot(struct kvm *kvm,
1520 				struct kvm_memory_slot *old,
1521 				struct kvm_memory_slot *new)
1522 {
1523 	int as_id = kvm_memslots_get_as_id(old, new);
1524 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1525 	int idx = slots->node_idx;
1526 
1527 	if (old) {
1528 		hash_del(&old->id_node[idx]);
1529 		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1530 
1531 		if ((long)old == atomic_long_read(&slots->last_used_slot))
1532 			atomic_long_set(&slots->last_used_slot, (long)new);
1533 
1534 		if (!new) {
1535 			kvm_erase_gfn_node(slots, old);
1536 			return;
1537 		}
1538 	}
1539 
1540 	/*
1541 	 * Initialize @new's hva range.  Do this even when replacing an @old
1542 	 * slot, kvm_copy_memslot() deliberately does not touch node data.
1543 	 */
1544 	new->hva_node[idx].start = new->userspace_addr;
1545 	new->hva_node[idx].last = new->userspace_addr +
1546 				  (new->npages << PAGE_SHIFT) - 1;
1547 
1548 	/*
1549 	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1550 	 * hva_node needs to be swapped with remove+insert even though hva can't
1551 	 * change when replacing an existing slot.
1552 	 */
1553 	hash_add(slots->id_hash, &new->id_node[idx], new->id);
1554 	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1555 
1556 	/*
1557 	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1558 	 * switch the node in the gfn tree instead of removing the old and
1559 	 * inserting the new as two separate operations. Replacement is a
1560 	 * single O(1) operation versus two O(log(n)) operations for
1561 	 * remove+insert.
1562 	 */
1563 	if (old && old->base_gfn == new->base_gfn) {
1564 		kvm_replace_gfn_node(slots, old, new);
1565 	} else {
1566 		if (old)
1567 			kvm_erase_gfn_node(slots, old);
1568 		kvm_insert_gfn_node(slots, new);
1569 	}
1570 }
1571 
1572 /*
1573  * Flags that do not access any of the extra space of struct
1574  * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1575  * only allows these.
1576  */
1577 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1578 	(KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1579 
check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1580 static int check_memory_region_flags(struct kvm *kvm,
1581 				     const struct kvm_userspace_memory_region2 *mem)
1582 {
1583 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1584 
1585 	if (IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
1586 		valid_flags |= KVM_MEM_GUEST_MEMFD;
1587 
1588 	/* Dirty logging private memory is not currently supported. */
1589 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
1590 		valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1591 
1592 	/*
1593 	 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1594 	 * read-only memslots have emulated MMIO, not page fault, semantics,
1595 	 * and KVM doesn't allow emulated MMIO for private memory.
1596 	 */
1597 	if (kvm_arch_has_readonly_mem(kvm) &&
1598 	    !(mem->flags & KVM_MEM_GUEST_MEMFD))
1599 		valid_flags |= KVM_MEM_READONLY;
1600 
1601 	if (mem->flags & ~valid_flags)
1602 		return -EINVAL;
1603 
1604 	return 0;
1605 }
1606 
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1607 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1608 {
1609 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1610 
1611 	/* Grab the generation from the activate memslots. */
1612 	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1613 
1614 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1615 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1616 
1617 	/*
1618 	 * Do not store the new memslots while there are invalidations in
1619 	 * progress, otherwise the locking in invalidate_range_start and
1620 	 * invalidate_range_end will be unbalanced.
1621 	 */
1622 	spin_lock(&kvm->mn_invalidate_lock);
1623 	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1624 	while (kvm->mn_active_invalidate_count) {
1625 		set_current_state(TASK_UNINTERRUPTIBLE);
1626 		spin_unlock(&kvm->mn_invalidate_lock);
1627 		schedule();
1628 		spin_lock(&kvm->mn_invalidate_lock);
1629 	}
1630 	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1631 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1632 	spin_unlock(&kvm->mn_invalidate_lock);
1633 
1634 	/*
1635 	 * Acquired in kvm_set_memslot. Must be released before synchronize
1636 	 * SRCU below in order to avoid deadlock with another thread
1637 	 * acquiring the slots_arch_lock in an srcu critical section.
1638 	 */
1639 	mutex_unlock(&kvm->slots_arch_lock);
1640 
1641 	synchronize_srcu_expedited(&kvm->srcu);
1642 
1643 	/*
1644 	 * Increment the new memslot generation a second time, dropping the
1645 	 * update in-progress flag and incrementing the generation based on
1646 	 * the number of address spaces.  This provides a unique and easily
1647 	 * identifiable generation number while the memslots are in flux.
1648 	 */
1649 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1650 
1651 	/*
1652 	 * Generations must be unique even across address spaces.  We do not need
1653 	 * a global counter for that, instead the generation space is evenly split
1654 	 * across address spaces.  For example, with two address spaces, address
1655 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1656 	 * use generations 1, 3, 5, ...
1657 	 */
1658 	gen += kvm_arch_nr_memslot_as_ids(kvm);
1659 
1660 	kvm_arch_memslots_updated(kvm, gen);
1661 
1662 	slots->generation = gen;
1663 }
1664 
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1665 static int kvm_prepare_memory_region(struct kvm *kvm,
1666 				     const struct kvm_memory_slot *old,
1667 				     struct kvm_memory_slot *new,
1668 				     enum kvm_mr_change change)
1669 {
1670 	int r;
1671 
1672 	/*
1673 	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1674 	 * will be freed on "commit".  If logging is enabled in both old and
1675 	 * new, reuse the existing bitmap.  If logging is enabled only in the
1676 	 * new and KVM isn't using a ring buffer, allocate and initialize a
1677 	 * new bitmap.
1678 	 */
1679 	if (change != KVM_MR_DELETE) {
1680 		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1681 			new->dirty_bitmap = NULL;
1682 		else if (old && old->dirty_bitmap)
1683 			new->dirty_bitmap = old->dirty_bitmap;
1684 		else if (kvm_use_dirty_bitmap(kvm)) {
1685 			r = kvm_alloc_dirty_bitmap(new);
1686 			if (r)
1687 				return r;
1688 
1689 			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1690 				bitmap_set(new->dirty_bitmap, 0, new->npages);
1691 		}
1692 	}
1693 
1694 	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1695 
1696 	/* Free the bitmap on failure if it was allocated above. */
1697 	if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1698 		kvm_destroy_dirty_bitmap(new);
1699 
1700 	return r;
1701 }
1702 
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1703 static void kvm_commit_memory_region(struct kvm *kvm,
1704 				     struct kvm_memory_slot *old,
1705 				     const struct kvm_memory_slot *new,
1706 				     enum kvm_mr_change change)
1707 {
1708 	int old_flags = old ? old->flags : 0;
1709 	int new_flags = new ? new->flags : 0;
1710 	/*
1711 	 * Update the total number of memslot pages before calling the arch
1712 	 * hook so that architectures can consume the result directly.
1713 	 */
1714 	if (change == KVM_MR_DELETE)
1715 		kvm->nr_memslot_pages -= old->npages;
1716 	else if (change == KVM_MR_CREATE)
1717 		kvm->nr_memslot_pages += new->npages;
1718 
1719 	if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1720 		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1721 		atomic_set(&kvm->nr_memslots_dirty_logging,
1722 			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1723 	}
1724 
1725 	kvm_arch_commit_memory_region(kvm, old, new, change);
1726 
1727 	switch (change) {
1728 	case KVM_MR_CREATE:
1729 		/* Nothing more to do. */
1730 		break;
1731 	case KVM_MR_DELETE:
1732 		/* Free the old memslot and all its metadata. */
1733 		kvm_free_memslot(kvm, old);
1734 		break;
1735 	case KVM_MR_MOVE:
1736 		/*
1737 		 * Moving a guest_memfd memslot isn't supported, and will never
1738 		 * be supported.
1739 		 */
1740 		WARN_ON_ONCE(old->flags & KVM_MEM_GUEST_MEMFD);
1741 		fallthrough;
1742 	case KVM_MR_FLAGS_ONLY:
1743 		/*
1744 		 * Free the dirty bitmap as needed; the below check encompasses
1745 		 * both the flags and whether a ring buffer is being used)
1746 		 */
1747 		if (old->dirty_bitmap && !new->dirty_bitmap)
1748 			kvm_destroy_dirty_bitmap(old);
1749 
1750 		/*
1751 		 * Unbind the guest_memfd instance as needed; the @new slot has
1752 		 * already created its own binding.  TODO: Drop the WARN when
1753 		 * dirty logging guest_memfd memslots is supported.  Until then,
1754 		 * flags-only changes on guest_memfd slots should be impossible.
1755 		 */
1756 		if (WARN_ON_ONCE(old->flags & KVM_MEM_GUEST_MEMFD))
1757 			kvm_gmem_unbind(old);
1758 
1759 		/*
1760 		 * The final quirk.  Free the detached, old slot, but only its
1761 		 * memory, not any metadata.  Metadata, including arch specific
1762 		 * data, may be reused by @new.
1763 		 */
1764 		kfree(old);
1765 		break;
1766 	default:
1767 		BUG();
1768 	}
1769 }
1770 
1771 /*
1772  * Activate @new, which must be installed in the inactive slots by the caller,
1773  * by swapping the active slots and then propagating @new to @old once @old is
1774  * unreachable and can be safely modified.
1775  *
1776  * With NULL @old this simply adds @new to @active (while swapping the sets).
1777  * With NULL @new this simply removes @old from @active and frees it
1778  * (while also swapping the sets).
1779  */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1780 static void kvm_activate_memslot(struct kvm *kvm,
1781 				 struct kvm_memory_slot *old,
1782 				 struct kvm_memory_slot *new)
1783 {
1784 	int as_id = kvm_memslots_get_as_id(old, new);
1785 
1786 	kvm_swap_active_memslots(kvm, as_id);
1787 
1788 	/* Propagate the new memslot to the now inactive memslots. */
1789 	kvm_replace_memslot(kvm, old, new);
1790 }
1791 
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1792 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1793 			     const struct kvm_memory_slot *src)
1794 {
1795 	dest->base_gfn = src->base_gfn;
1796 	dest->npages = src->npages;
1797 	dest->dirty_bitmap = src->dirty_bitmap;
1798 	dest->arch = src->arch;
1799 	dest->userspace_addr = src->userspace_addr;
1800 	dest->flags = src->flags;
1801 	dest->id = src->id;
1802 	dest->as_id = src->as_id;
1803 }
1804 
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1805 static void kvm_invalidate_memslot(struct kvm *kvm,
1806 				   struct kvm_memory_slot *old,
1807 				   struct kvm_memory_slot *invalid_slot)
1808 {
1809 	/*
1810 	 * Mark the current slot INVALID.  As with all memslot modifications,
1811 	 * this must be done on an unreachable slot to avoid modifying the
1812 	 * current slot in the active tree.
1813 	 */
1814 	kvm_copy_memslot(invalid_slot, old);
1815 	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1816 	kvm_replace_memslot(kvm, old, invalid_slot);
1817 
1818 	/*
1819 	 * Activate the slot that is now marked INVALID, but don't propagate
1820 	 * the slot to the now inactive slots. The slot is either going to be
1821 	 * deleted or recreated as a new slot.
1822 	 */
1823 	kvm_swap_active_memslots(kvm, old->as_id);
1824 
1825 	/*
1826 	 * From this point no new shadow pages pointing to a deleted, or moved,
1827 	 * memslot will be created.  Validation of sp->gfn happens in:
1828 	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1829 	 *	- kvm_is_visible_gfn (mmu_check_root)
1830 	 */
1831 	kvm_arch_flush_shadow_memslot(kvm, old);
1832 	kvm_arch_guest_memory_reclaimed(kvm);
1833 
1834 	/* Was released by kvm_swap_active_memslots(), reacquire. */
1835 	mutex_lock(&kvm->slots_arch_lock);
1836 
1837 	/*
1838 	 * Copy the arch-specific field of the newly-installed slot back to the
1839 	 * old slot as the arch data could have changed between releasing
1840 	 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1841 	 * above.  Writers are required to retrieve memslots *after* acquiring
1842 	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1843 	 */
1844 	old->arch = invalid_slot->arch;
1845 }
1846 
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1847 static void kvm_create_memslot(struct kvm *kvm,
1848 			       struct kvm_memory_slot *new)
1849 {
1850 	/* Add the new memslot to the inactive set and activate. */
1851 	kvm_replace_memslot(kvm, NULL, new);
1852 	kvm_activate_memslot(kvm, NULL, new);
1853 }
1854 
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1855 static void kvm_delete_memslot(struct kvm *kvm,
1856 			       struct kvm_memory_slot *old,
1857 			       struct kvm_memory_slot *invalid_slot)
1858 {
1859 	/*
1860 	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1861 	 * the "new" slot, and for the invalid version in the active slots.
1862 	 */
1863 	kvm_replace_memslot(kvm, old, NULL);
1864 	kvm_activate_memslot(kvm, invalid_slot, NULL);
1865 }
1866 
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1867 static void kvm_move_memslot(struct kvm *kvm,
1868 			     struct kvm_memory_slot *old,
1869 			     struct kvm_memory_slot *new,
1870 			     struct kvm_memory_slot *invalid_slot)
1871 {
1872 	/*
1873 	 * Replace the old memslot in the inactive slots, and then swap slots
1874 	 * and replace the current INVALID with the new as well.
1875 	 */
1876 	kvm_replace_memslot(kvm, old, new);
1877 	kvm_activate_memslot(kvm, invalid_slot, new);
1878 }
1879 
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1880 static void kvm_update_flags_memslot(struct kvm *kvm,
1881 				     struct kvm_memory_slot *old,
1882 				     struct kvm_memory_slot *new)
1883 {
1884 	/*
1885 	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1886 	 * an intermediate step. Instead, the old memslot is simply replaced
1887 	 * with a new, updated copy in both memslot sets.
1888 	 */
1889 	kvm_replace_memslot(kvm, old, new);
1890 	kvm_activate_memslot(kvm, old, new);
1891 }
1892 
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1893 static int kvm_set_memslot(struct kvm *kvm,
1894 			   struct kvm_memory_slot *old,
1895 			   struct kvm_memory_slot *new,
1896 			   enum kvm_mr_change change)
1897 {
1898 	struct kvm_memory_slot *invalid_slot;
1899 	int r;
1900 
1901 	/*
1902 	 * Released in kvm_swap_active_memslots().
1903 	 *
1904 	 * Must be held from before the current memslots are copied until after
1905 	 * the new memslots are installed with rcu_assign_pointer, then
1906 	 * released before the synchronize srcu in kvm_swap_active_memslots().
1907 	 *
1908 	 * When modifying memslots outside of the slots_lock, must be held
1909 	 * before reading the pointer to the current memslots until after all
1910 	 * changes to those memslots are complete.
1911 	 *
1912 	 * These rules ensure that installing new memslots does not lose
1913 	 * changes made to the previous memslots.
1914 	 */
1915 	mutex_lock(&kvm->slots_arch_lock);
1916 
1917 	/*
1918 	 * Invalidate the old slot if it's being deleted or moved.  This is
1919 	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1920 	 * continue running by ensuring there are no mappings or shadow pages
1921 	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1922 	 * (and without a lock), a window would exist between effecting the
1923 	 * delete/move and committing the changes in arch code where KVM or a
1924 	 * guest could access a non-existent memslot.
1925 	 *
1926 	 * Modifications are done on a temporary, unreachable slot.  The old
1927 	 * slot needs to be preserved in case a later step fails and the
1928 	 * invalidation needs to be reverted.
1929 	 */
1930 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1931 		invalid_slot = kzalloc_obj(*invalid_slot, GFP_KERNEL_ACCOUNT);
1932 		if (!invalid_slot) {
1933 			mutex_unlock(&kvm->slots_arch_lock);
1934 			return -ENOMEM;
1935 		}
1936 		kvm_invalidate_memslot(kvm, old, invalid_slot);
1937 	}
1938 
1939 	r = kvm_prepare_memory_region(kvm, old, new, change);
1940 	if (r) {
1941 		/*
1942 		 * For DELETE/MOVE, revert the above INVALID change.  No
1943 		 * modifications required since the original slot was preserved
1944 		 * in the inactive slots.  Changing the active memslots also
1945 		 * release slots_arch_lock.
1946 		 */
1947 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1948 			kvm_activate_memslot(kvm, invalid_slot, old);
1949 			kfree(invalid_slot);
1950 		} else {
1951 			mutex_unlock(&kvm->slots_arch_lock);
1952 		}
1953 		return r;
1954 	}
1955 
1956 	/*
1957 	 * For DELETE and MOVE, the working slot is now active as the INVALID
1958 	 * version of the old slot.  MOVE is particularly special as it reuses
1959 	 * the old slot and returns a copy of the old slot (in working_slot).
1960 	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1961 	 * old slot is detached but otherwise preserved.
1962 	 */
1963 	if (change == KVM_MR_CREATE)
1964 		kvm_create_memslot(kvm, new);
1965 	else if (change == KVM_MR_DELETE)
1966 		kvm_delete_memslot(kvm, old, invalid_slot);
1967 	else if (change == KVM_MR_MOVE)
1968 		kvm_move_memslot(kvm, old, new, invalid_slot);
1969 	else if (change == KVM_MR_FLAGS_ONLY)
1970 		kvm_update_flags_memslot(kvm, old, new);
1971 	else
1972 		BUG();
1973 
1974 	/* Free the temporary INVALID slot used for DELETE and MOVE. */
1975 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1976 		kfree(invalid_slot);
1977 
1978 	/*
1979 	 * No need to refresh new->arch, changes after dropping slots_arch_lock
1980 	 * will directly hit the final, active memslot.  Architectures are
1981 	 * responsible for knowing that new->arch may be stale.
1982 	 */
1983 	kvm_commit_memory_region(kvm, old, new, change);
1984 
1985 	return 0;
1986 }
1987 
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1988 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1989 				      gfn_t start, gfn_t end)
1990 {
1991 	struct kvm_memslot_iter iter;
1992 
1993 	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1994 		if (iter.slot->id != id)
1995 			return true;
1996 	}
1997 
1998 	return false;
1999 }
2000 
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2001 static int kvm_set_memory_region(struct kvm *kvm,
2002 				 const struct kvm_userspace_memory_region2 *mem)
2003 {
2004 	struct kvm_memory_slot *old, *new;
2005 	struct kvm_memslots *slots;
2006 	enum kvm_mr_change change;
2007 	unsigned long npages;
2008 	gfn_t base_gfn;
2009 	int as_id, id;
2010 	int r;
2011 
2012 	lockdep_assert_held(&kvm->slots_lock);
2013 
2014 	r = check_memory_region_flags(kvm, mem);
2015 	if (r)
2016 		return r;
2017 
2018 	as_id = mem->slot >> 16;
2019 	id = (u16)mem->slot;
2020 
2021 	/* General sanity checks */
2022 	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2023 	    (mem->memory_size != (unsigned long)mem->memory_size))
2024 		return -EINVAL;
2025 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2026 		return -EINVAL;
2027 	/* We can read the guest memory with __xxx_user() later on. */
2028 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2029 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2030 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2031 			mem->memory_size))
2032 		return -EINVAL;
2033 	if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2034 	    (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2035 	     mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2036 		return -EINVAL;
2037 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2038 		return -EINVAL;
2039 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2040 		return -EINVAL;
2041 
2042 	/*
2043 	 * The size of userspace-defined memory regions is restricted in order
2044 	 * to play nice with dirty bitmap operations, which are indexed with an
2045 	 * "unsigned int".  KVM's internal memory regions don't support dirty
2046 	 * logging, and so are exempt.
2047 	 */
2048 	if (id < KVM_USER_MEM_SLOTS &&
2049 	    (mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2050 		return -EINVAL;
2051 
2052 	slots = __kvm_memslots(kvm, as_id);
2053 
2054 	/*
2055 	 * Note, the old memslot (and the pointer itself!) may be invalidated
2056 	 * and/or destroyed by kvm_set_memslot().
2057 	 */
2058 	old = id_to_memslot(slots, id);
2059 
2060 	if (!mem->memory_size) {
2061 		if (!old || !old->npages)
2062 			return -EINVAL;
2063 
2064 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2065 			return -EIO;
2066 
2067 		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2068 	}
2069 
2070 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2071 	npages = (mem->memory_size >> PAGE_SHIFT);
2072 
2073 	if (!old || !old->npages) {
2074 		change = KVM_MR_CREATE;
2075 
2076 		/*
2077 		 * To simplify KVM internals, the total number of pages across
2078 		 * all memslots must fit in an unsigned long.
2079 		 */
2080 		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2081 			return -EINVAL;
2082 	} else { /* Modify an existing slot. */
2083 		/* Private memslots are immutable, they can only be deleted. */
2084 		if (mem->flags & KVM_MEM_GUEST_MEMFD)
2085 			return -EINVAL;
2086 		if ((mem->userspace_addr != old->userspace_addr) ||
2087 		    (npages != old->npages) ||
2088 		    ((mem->flags ^ old->flags) & (KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD)))
2089 			return -EINVAL;
2090 
2091 		if (base_gfn != old->base_gfn)
2092 			change = KVM_MR_MOVE;
2093 		else if (mem->flags != old->flags)
2094 			change = KVM_MR_FLAGS_ONLY;
2095 		else /* Nothing to change. */
2096 			return 0;
2097 	}
2098 
2099 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2100 	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2101 		return -EEXIST;
2102 
2103 	/* Allocate a slot that will persist in the memslot. */
2104 	new = kzalloc_obj(*new, GFP_KERNEL_ACCOUNT);
2105 	if (!new)
2106 		return -ENOMEM;
2107 
2108 	new->as_id = as_id;
2109 	new->id = id;
2110 	new->base_gfn = base_gfn;
2111 	new->npages = npages;
2112 	new->flags = mem->flags;
2113 	new->userspace_addr = mem->userspace_addr;
2114 	if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2115 		r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2116 		if (r)
2117 			goto out;
2118 	}
2119 
2120 	r = kvm_set_memslot(kvm, old, new, change);
2121 	if (r)
2122 		goto out_unbind;
2123 
2124 	return 0;
2125 
2126 out_unbind:
2127 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
2128 		kvm_gmem_unbind(new);
2129 out:
2130 	kfree(new);
2131 	return r;
2132 }
2133 
kvm_set_internal_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2134 int kvm_set_internal_memslot(struct kvm *kvm,
2135 			     const struct kvm_userspace_memory_region2 *mem)
2136 {
2137 	if (WARN_ON_ONCE(mem->slot < KVM_USER_MEM_SLOTS))
2138 		return -EINVAL;
2139 
2140 	if (WARN_ON_ONCE(mem->flags))
2141 		return -EINVAL;
2142 
2143 	return kvm_set_memory_region(kvm, mem);
2144 }
2145 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
2146 
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem)2147 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2148 					  struct kvm_userspace_memory_region2 *mem)
2149 {
2150 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2151 		return -EINVAL;
2152 
2153 	guard(mutex)(&kvm->slots_lock);
2154 	return kvm_set_memory_region(kvm, mem);
2155 }
2156 
2157 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2158 /**
2159  * kvm_get_dirty_log - get a snapshot of dirty pages
2160  * @kvm:	pointer to kvm instance
2161  * @log:	slot id and address to which we copy the log
2162  * @is_dirty:	set to '1' if any dirty pages were found
2163  * @memslot:	set to the associated memslot, always valid on success
2164  */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)2165 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2166 		      int *is_dirty, struct kvm_memory_slot **memslot)
2167 {
2168 	struct kvm_memslots *slots;
2169 	int i, as_id, id;
2170 	unsigned long n;
2171 	unsigned long any = 0;
2172 
2173 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2174 	if (!kvm_use_dirty_bitmap(kvm))
2175 		return -ENXIO;
2176 
2177 	*memslot = NULL;
2178 	*is_dirty = 0;
2179 
2180 	as_id = log->slot >> 16;
2181 	id = (u16)log->slot;
2182 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2183 		return -EINVAL;
2184 
2185 	slots = __kvm_memslots(kvm, as_id);
2186 	*memslot = id_to_memslot(slots, id);
2187 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
2188 		return -ENOENT;
2189 
2190 	kvm_arch_sync_dirty_log(kvm, *memslot);
2191 
2192 	n = kvm_dirty_bitmap_bytes(*memslot);
2193 
2194 	for (i = 0; !any && i < n/sizeof(long); ++i)
2195 		any = (*memslot)->dirty_bitmap[i];
2196 
2197 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2198 		return -EFAULT;
2199 
2200 	if (any)
2201 		*is_dirty = 1;
2202 	return 0;
2203 }
2204 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
2205 
2206 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2207 /**
2208  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2209  *	and reenable dirty page tracking for the corresponding pages.
2210  * @kvm:	pointer to kvm instance
2211  * @log:	slot id and address to which we copy the log
2212  *
2213  * We need to keep it in mind that VCPU threads can write to the bitmap
2214  * concurrently. So, to avoid losing track of dirty pages we keep the
2215  * following order:
2216  *
2217  *    1. Take a snapshot of the bit and clear it if needed.
2218  *    2. Write protect the corresponding page.
2219  *    3. Copy the snapshot to the userspace.
2220  *    4. Upon return caller flushes TLB's if needed.
2221  *
2222  * Between 2 and 4, the guest may write to the page using the remaining TLB
2223  * entry.  This is not a problem because the page is reported dirty using
2224  * the snapshot taken before and step 4 ensures that writes done after
2225  * exiting to userspace will be logged for the next call.
2226  *
2227  */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)2228 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2229 {
2230 	struct kvm_memslots *slots;
2231 	struct kvm_memory_slot *memslot;
2232 	int i, as_id, id;
2233 	unsigned long n;
2234 	unsigned long *dirty_bitmap;
2235 	unsigned long *dirty_bitmap_buffer;
2236 	bool flush;
2237 
2238 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2239 	if (!kvm_use_dirty_bitmap(kvm))
2240 		return -ENXIO;
2241 
2242 	as_id = log->slot >> 16;
2243 	id = (u16)log->slot;
2244 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2245 		return -EINVAL;
2246 
2247 	slots = __kvm_memslots(kvm, as_id);
2248 	memslot = id_to_memslot(slots, id);
2249 	if (!memslot || !memslot->dirty_bitmap)
2250 		return -ENOENT;
2251 
2252 	dirty_bitmap = memslot->dirty_bitmap;
2253 
2254 	kvm_arch_sync_dirty_log(kvm, memslot);
2255 
2256 	n = kvm_dirty_bitmap_bytes(memslot);
2257 	flush = false;
2258 	if (kvm->manual_dirty_log_protect) {
2259 		/*
2260 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
2261 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2262 		 * is some code duplication between this function and
2263 		 * kvm_get_dirty_log, but hopefully all architecture
2264 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2265 		 * can be eliminated.
2266 		 */
2267 		dirty_bitmap_buffer = dirty_bitmap;
2268 	} else {
2269 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2270 		memset(dirty_bitmap_buffer, 0, n);
2271 
2272 		KVM_MMU_LOCK(kvm);
2273 		for (i = 0; i < n / sizeof(long); i++) {
2274 			unsigned long mask;
2275 			gfn_t offset;
2276 
2277 			if (!dirty_bitmap[i])
2278 				continue;
2279 
2280 			flush = true;
2281 			mask = xchg(&dirty_bitmap[i], 0);
2282 			dirty_bitmap_buffer[i] = mask;
2283 
2284 			offset = i * BITS_PER_LONG;
2285 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2286 								offset, mask);
2287 		}
2288 		KVM_MMU_UNLOCK(kvm);
2289 	}
2290 
2291 	if (flush)
2292 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2293 
2294 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2295 		return -EFAULT;
2296 	return 0;
2297 }
2298 
2299 
2300 /**
2301  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2302  * @kvm: kvm instance
2303  * @log: slot id and address to which we copy the log
2304  *
2305  * Steps 1-4 below provide general overview of dirty page logging. See
2306  * kvm_get_dirty_log_protect() function description for additional details.
2307  *
2308  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2309  * always flush the TLB (step 4) even if previous step failed  and the dirty
2310  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2311  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2312  * writes will be marked dirty for next log read.
2313  *
2314  *   1. Take a snapshot of the bit and clear it if needed.
2315  *   2. Write protect the corresponding page.
2316  *   3. Copy the snapshot to the userspace.
2317  *   4. Flush TLB's if needed.
2318  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)2319 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2320 				      struct kvm_dirty_log *log)
2321 {
2322 	int r;
2323 
2324 	mutex_lock(&kvm->slots_lock);
2325 
2326 	r = kvm_get_dirty_log_protect(kvm, log);
2327 
2328 	mutex_unlock(&kvm->slots_lock);
2329 	return r;
2330 }
2331 
2332 /**
2333  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2334  *	and reenable dirty page tracking for the corresponding pages.
2335  * @kvm:	pointer to kvm instance
2336  * @log:	slot id and address from which to fetch the bitmap of dirty pages
2337  */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)2338 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2339 				       struct kvm_clear_dirty_log *log)
2340 {
2341 	struct kvm_memslots *slots;
2342 	struct kvm_memory_slot *memslot;
2343 	int as_id, id;
2344 	gfn_t offset;
2345 	unsigned long i, n;
2346 	unsigned long *dirty_bitmap;
2347 	unsigned long *dirty_bitmap_buffer;
2348 	bool flush;
2349 
2350 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2351 	if (!kvm_use_dirty_bitmap(kvm))
2352 		return -ENXIO;
2353 
2354 	as_id = log->slot >> 16;
2355 	id = (u16)log->slot;
2356 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2357 		return -EINVAL;
2358 
2359 	if (log->first_page & 63)
2360 		return -EINVAL;
2361 
2362 	slots = __kvm_memslots(kvm, as_id);
2363 	memslot = id_to_memslot(slots, id);
2364 	if (!memslot || !memslot->dirty_bitmap)
2365 		return -ENOENT;
2366 
2367 	dirty_bitmap = memslot->dirty_bitmap;
2368 
2369 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2370 
2371 	if (log->first_page > memslot->npages ||
2372 	    log->num_pages > memslot->npages - log->first_page ||
2373 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2374 	    return -EINVAL;
2375 
2376 	kvm_arch_sync_dirty_log(kvm, memslot);
2377 
2378 	flush = false;
2379 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2380 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2381 		return -EFAULT;
2382 
2383 	KVM_MMU_LOCK(kvm);
2384 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
2385 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2386 	     i++, offset += BITS_PER_LONG) {
2387 		unsigned long mask = *dirty_bitmap_buffer++;
2388 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2389 		if (!mask)
2390 			continue;
2391 
2392 		mask &= atomic_long_fetch_andnot(mask, p);
2393 
2394 		/*
2395 		 * mask contains the bits that really have been cleared.  This
2396 		 * never includes any bits beyond the length of the memslot (if
2397 		 * the length is not aligned to 64 pages), therefore it is not
2398 		 * a problem if userspace sets them in log->dirty_bitmap.
2399 		*/
2400 		if (mask) {
2401 			flush = true;
2402 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2403 								offset, mask);
2404 		}
2405 	}
2406 	KVM_MMU_UNLOCK(kvm);
2407 
2408 	if (flush)
2409 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2410 
2411 	return 0;
2412 }
2413 
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2414 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2415 					struct kvm_clear_dirty_log *log)
2416 {
2417 	int r;
2418 
2419 	mutex_lock(&kvm->slots_lock);
2420 
2421 	r = kvm_clear_dirty_log_protect(kvm, log);
2422 
2423 	mutex_unlock(&kvm->slots_lock);
2424 	return r;
2425 }
2426 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2427 
2428 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_supported_mem_attributes(struct kvm * kvm)2429 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2430 {
2431 	if (!kvm || kvm_arch_has_private_mem(kvm))
2432 		return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2433 
2434 	return 0;
2435 }
2436 
2437 /*
2438  * Returns true if _all_ gfns in the range [@start, @end) have attributes
2439  * such that the bits in @mask match @attrs.
2440  */
kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs)2441 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2442 				     unsigned long mask, unsigned long attrs)
2443 {
2444 	XA_STATE(xas, &kvm->mem_attr_array, start);
2445 	unsigned long index;
2446 	void *entry;
2447 
2448 	mask &= kvm_supported_mem_attributes(kvm);
2449 	if (attrs & ~mask)
2450 		return false;
2451 
2452 	if (end == start + 1)
2453 		return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2454 
2455 	guard(rcu)();
2456 	if (!attrs)
2457 		return !xas_find(&xas, end - 1);
2458 
2459 	for (index = start; index < end; index++) {
2460 		do {
2461 			entry = xas_next(&xas);
2462 		} while (xas_retry(&xas, entry));
2463 
2464 		if (xas.xa_index != index ||
2465 		    (xa_to_value(entry) & mask) != attrs)
2466 			return false;
2467 	}
2468 
2469 	return true;
2470 }
2471 
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range)2472 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2473 						 struct kvm_mmu_notifier_range *range)
2474 {
2475 	struct kvm_gfn_range gfn_range;
2476 	struct kvm_memory_slot *slot;
2477 	struct kvm_memslots *slots;
2478 	struct kvm_memslot_iter iter;
2479 	bool found_memslot = false;
2480 	bool ret = false;
2481 	int i;
2482 
2483 	gfn_range.arg = range->arg;
2484 	gfn_range.may_block = range->may_block;
2485 
2486 	/*
2487 	 * If/when KVM supports more attributes beyond private .vs shared, this
2488 	 * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target
2489 	 * range already has the desired private vs. shared state (it's unclear
2490 	 * if that is a net win).  For now, KVM reaches this point if and only
2491 	 * if the private flag is being toggled, i.e. all mappings are in play.
2492 	 */
2493 
2494 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2495 		slots = __kvm_memslots(kvm, i);
2496 
2497 		kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2498 			slot = iter.slot;
2499 			gfn_range.slot = slot;
2500 
2501 			gfn_range.start = max(range->start, slot->base_gfn);
2502 			gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2503 			if (gfn_range.start >= gfn_range.end)
2504 				continue;
2505 
2506 			if (!found_memslot) {
2507 				found_memslot = true;
2508 				KVM_MMU_LOCK(kvm);
2509 				if (!IS_KVM_NULL_FN(range->on_lock))
2510 					range->on_lock(kvm);
2511 			}
2512 
2513 			ret |= range->handler(kvm, &gfn_range);
2514 		}
2515 	}
2516 
2517 	if (range->flush_on_ret && ret)
2518 		kvm_flush_remote_tlbs(kvm);
2519 
2520 	if (found_memslot)
2521 		KVM_MMU_UNLOCK(kvm);
2522 }
2523 
kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)2524 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2525 					  struct kvm_gfn_range *range)
2526 {
2527 	/*
2528 	 * Unconditionally add the range to the invalidation set, regardless of
2529 	 * whether or not the arch callback actually needs to zap SPTEs.  E.g.
2530 	 * if KVM supports RWX attributes in the future and the attributes are
2531 	 * going from R=>RW, zapping isn't strictly necessary.  Unconditionally
2532 	 * adding the range allows KVM to require that MMU invalidations add at
2533 	 * least one range between begin() and end(), e.g. allows KVM to detect
2534 	 * bugs where the add() is missed.  Relaxing the rule *might* be safe,
2535 	 * but it's not obvious that allowing new mappings while the attributes
2536 	 * are in flux is desirable or worth the complexity.
2537 	 */
2538 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2539 
2540 	return kvm_arch_pre_set_memory_attributes(kvm, range);
2541 }
2542 
2543 /* Set @attributes for the gfn range [@start, @end). */
kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes)2544 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2545 				     unsigned long attributes)
2546 {
2547 	struct kvm_mmu_notifier_range pre_set_range = {
2548 		.start = start,
2549 		.end = end,
2550 		.arg.attributes = attributes,
2551 		.handler = kvm_pre_set_memory_attributes,
2552 		.on_lock = kvm_mmu_invalidate_begin,
2553 		.flush_on_ret = true,
2554 		.may_block = true,
2555 	};
2556 	struct kvm_mmu_notifier_range post_set_range = {
2557 		.start = start,
2558 		.end = end,
2559 		.arg.attributes = attributes,
2560 		.handler = kvm_arch_post_set_memory_attributes,
2561 		.on_lock = kvm_mmu_invalidate_end,
2562 		.may_block = true,
2563 	};
2564 	unsigned long i;
2565 	void *entry;
2566 	int r = 0;
2567 
2568 	entry = attributes ? xa_mk_value(attributes) : NULL;
2569 
2570 	trace_kvm_vm_set_mem_attributes(start, end, attributes);
2571 
2572 	mutex_lock(&kvm->slots_lock);
2573 
2574 	/* Nothing to do if the entire range has the desired attributes. */
2575 	if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2576 		goto out_unlock;
2577 
2578 	/*
2579 	 * Reserve memory ahead of time to avoid having to deal with failures
2580 	 * partway through setting the new attributes.
2581 	 */
2582 	for (i = start; i < end; i++) {
2583 		r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2584 		if (r)
2585 			goto out_unlock;
2586 
2587 		cond_resched();
2588 	}
2589 
2590 	kvm_handle_gfn_range(kvm, &pre_set_range);
2591 
2592 	for (i = start; i < end; i++) {
2593 		r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2594 				    GFP_KERNEL_ACCOUNT));
2595 		KVM_BUG_ON(r, kvm);
2596 		cond_resched();
2597 	}
2598 
2599 	kvm_handle_gfn_range(kvm, &post_set_range);
2600 
2601 out_unlock:
2602 	mutex_unlock(&kvm->slots_lock);
2603 
2604 	return r;
2605 }
kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs)2606 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2607 					   struct kvm_memory_attributes *attrs)
2608 {
2609 	gfn_t start, end;
2610 
2611 	/* flags is currently not used. */
2612 	if (attrs->flags)
2613 		return -EINVAL;
2614 	if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2615 		return -EINVAL;
2616 	if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2617 		return -EINVAL;
2618 	if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2619 		return -EINVAL;
2620 
2621 	start = attrs->address >> PAGE_SHIFT;
2622 	end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2623 
2624 	/*
2625 	 * xarray tracks data using "unsigned long", and as a result so does
2626 	 * KVM.  For simplicity, supports generic attributes only on 64-bit
2627 	 * architectures.
2628 	 */
2629 	BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2630 
2631 	return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2632 }
2633 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2634 
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2635 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2636 {
2637 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2638 }
2639 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
2640 
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2641 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2642 {
2643 	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2644 	u64 gen = slots->generation;
2645 	struct kvm_memory_slot *slot;
2646 
2647 	/*
2648 	 * This also protects against using a memslot from a different address space,
2649 	 * since different address spaces have different generation numbers.
2650 	 */
2651 	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2652 		vcpu->last_used_slot = NULL;
2653 		vcpu->last_used_slot_gen = gen;
2654 	}
2655 
2656 	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2657 	if (slot)
2658 		return slot;
2659 
2660 	/*
2661 	 * Fall back to searching all memslots. We purposely use
2662 	 * search_memslots() instead of __gfn_to_memslot() to avoid
2663 	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2664 	 */
2665 	slot = search_memslots(slots, gfn, false);
2666 	if (slot) {
2667 		vcpu->last_used_slot = slot;
2668 		return slot;
2669 	}
2670 
2671 	return NULL;
2672 }
2673 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
2674 
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)2675 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2676 {
2677 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2678 
2679 	return kvm_is_visible_memslot(memslot);
2680 }
2681 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
2682 
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)2683 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2684 {
2685 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2686 
2687 	return kvm_is_visible_memslot(memslot);
2688 }
2689 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
2690 
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)2691 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2692 {
2693 	struct vm_area_struct *vma;
2694 	unsigned long addr, size;
2695 
2696 	size = PAGE_SIZE;
2697 
2698 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2699 	if (kvm_is_error_hva(addr))
2700 		return PAGE_SIZE;
2701 
2702 	mmap_read_lock(current->mm);
2703 	vma = find_vma(current->mm, addr);
2704 	if (!vma)
2705 		goto out;
2706 
2707 	size = vma_kernel_pagesize(vma);
2708 
2709 out:
2710 	mmap_read_unlock(current->mm);
2711 
2712 	return size;
2713 }
2714 
memslot_is_readonly(const struct kvm_memory_slot * slot)2715 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2716 {
2717 	return slot->flags & KVM_MEM_READONLY;
2718 }
2719 
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)2720 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2721 				       gfn_t *nr_pages, bool write)
2722 {
2723 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2724 		return KVM_HVA_ERR_BAD;
2725 
2726 	if (memslot_is_readonly(slot) && write)
2727 		return KVM_HVA_ERR_RO_BAD;
2728 
2729 	if (nr_pages)
2730 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
2731 
2732 	return __gfn_to_hva_memslot(slot, gfn);
2733 }
2734 
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2735 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2736 				     gfn_t *nr_pages)
2737 {
2738 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2739 }
2740 
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2741 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2742 					gfn_t gfn)
2743 {
2744 	return gfn_to_hva_many(slot, gfn, NULL);
2745 }
2746 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
2747 
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2748 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2749 {
2750 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2751 }
2752 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
2753 
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)2754 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2755 {
2756 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2757 }
2758 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
2759 
2760 /*
2761  * Return the hva of a @gfn and the R/W attribute if possible.
2762  *
2763  * @slot: the kvm_memory_slot which contains @gfn
2764  * @gfn: the gfn to be translated
2765  * @writable: used to return the read/write attribute of the @slot if the hva
2766  * is valid and @writable is not NULL
2767  */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2768 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2769 				      gfn_t gfn, bool *writable)
2770 {
2771 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2772 
2773 	if (!kvm_is_error_hva(hva) && writable)
2774 		*writable = !memslot_is_readonly(slot);
2775 
2776 	return hva;
2777 }
2778 
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)2779 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2780 {
2781 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2782 
2783 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2784 }
2785 
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)2786 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2787 {
2788 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2789 
2790 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2791 }
2792 
kvm_is_ad_tracked_page(struct page * page)2793 static bool kvm_is_ad_tracked_page(struct page *page)
2794 {
2795 	/*
2796 	 * Per page-flags.h, pages tagged PG_reserved "should in general not be
2797 	 * touched (e.g. set dirty) except by its owner".
2798 	 */
2799 	return !PageReserved(page);
2800 }
2801 
kvm_set_page_dirty(struct page * page)2802 static void kvm_set_page_dirty(struct page *page)
2803 {
2804 	if (kvm_is_ad_tracked_page(page))
2805 		SetPageDirty(page);
2806 }
2807 
kvm_set_page_accessed(struct page * page)2808 static void kvm_set_page_accessed(struct page *page)
2809 {
2810 	if (kvm_is_ad_tracked_page(page))
2811 		mark_page_accessed(page);
2812 }
2813 
kvm_release_page_clean(struct page * page)2814 void kvm_release_page_clean(struct page *page)
2815 {
2816 	if (!page)
2817 		return;
2818 
2819 	kvm_set_page_accessed(page);
2820 	put_page(page);
2821 }
2822 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
2823 
kvm_release_page_dirty(struct page * page)2824 void kvm_release_page_dirty(struct page *page)
2825 {
2826 	if (!page)
2827 		return;
2828 
2829 	kvm_set_page_dirty(page);
2830 	kvm_release_page_clean(page);
2831 }
2832 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
2833 
kvm_resolve_pfn(struct kvm_follow_pfn * kfp,struct page * page,struct follow_pfnmap_args * map,bool writable)2834 static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
2835 				 struct follow_pfnmap_args *map, bool writable)
2836 {
2837 	kvm_pfn_t pfn;
2838 
2839 	WARN_ON_ONCE(!!page == !!map);
2840 
2841 	if (kfp->map_writable)
2842 		*kfp->map_writable = writable;
2843 
2844 	if (map)
2845 		pfn = map->pfn;
2846 	else
2847 		pfn = page_to_pfn(page);
2848 
2849 	*kfp->refcounted_page = page;
2850 
2851 	return pfn;
2852 }
2853 
2854 /*
2855  * The fast path to get the writable pfn which will be stored in @pfn,
2856  * true indicates success, otherwise false is returned.
2857  */
hva_to_pfn_fast(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2858 static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2859 {
2860 	struct page *page;
2861 	bool r;
2862 
2863 	/*
2864 	 * Try the fast-only path when the caller wants to pin/get the page for
2865 	 * writing.  If the caller only wants to read the page, KVM must go
2866 	 * down the full, slow path in order to avoid racing an operation that
2867 	 * breaks Copy-on-Write (CoW), e.g. so that KVM doesn't end up pointing
2868 	 * at the old, read-only page while mm/ points at a new, writable page.
2869 	 */
2870 	if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
2871 		return false;
2872 
2873 	if (kfp->pin)
2874 		r = pin_user_pages_fast(kfp->hva, 1, FOLL_WRITE, &page) == 1;
2875 	else
2876 		r = get_user_page_fast_only(kfp->hva, FOLL_WRITE, &page);
2877 
2878 	if (r) {
2879 		*pfn = kvm_resolve_pfn(kfp, page, NULL, true);
2880 		return true;
2881 	}
2882 
2883 	return false;
2884 }
2885 
2886 /*
2887  * The slow path to get the pfn of the specified host virtual address,
2888  * 1 indicates success, -errno is returned if error is detected.
2889  */
hva_to_pfn_slow(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2890 static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2891 {
2892 	/*
2893 	 * When a VCPU accesses a page that is not mapped into the secondary
2894 	 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2895 	 * make progress. We always want to honor NUMA hinting faults in that
2896 	 * case, because GUP usage corresponds to memory accesses from the VCPU.
2897 	 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2898 	 * mapped into the secondary MMU and gets accessed by a VCPU.
2899 	 *
2900 	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2901 	 * implicitly honor NUMA hinting faults and don't need this flag.
2902 	 */
2903 	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
2904 	struct page *page, *wpage;
2905 	int npages;
2906 
2907 	if (kfp->pin)
2908 		npages = pin_user_pages_unlocked(kfp->hva, 1, &page, flags);
2909 	else
2910 		npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
2911 	if (npages != 1)
2912 		return npages;
2913 
2914 	/*
2915 	 * Pinning is mutually exclusive with opportunistically mapping a read
2916 	 * fault as writable, as KVM should never pin pages when mapping memory
2917 	 * into the guest (pinning is only for direct accesses from KVM).
2918 	 */
2919 	if (WARN_ON_ONCE(kfp->map_writable && kfp->pin))
2920 		goto out;
2921 
2922 	/* map read fault as writable if possible */
2923 	if (!(flags & FOLL_WRITE) && kfp->map_writable &&
2924 	    get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
2925 		put_page(page);
2926 		page = wpage;
2927 		flags |= FOLL_WRITE;
2928 	}
2929 
2930 out:
2931 	*pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE);
2932 	return npages;
2933 }
2934 
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2935 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2936 {
2937 	if (unlikely(!(vma->vm_flags & VM_READ)))
2938 		return false;
2939 
2940 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2941 		return false;
2942 
2943 	return true;
2944 }
2945 
hva_to_pfn_remapped(struct vm_area_struct * vma,struct kvm_follow_pfn * kfp,kvm_pfn_t * p_pfn)2946 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2947 			       struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
2948 {
2949 	struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva };
2950 	bool write_fault = kfp->flags & FOLL_WRITE;
2951 	int r;
2952 
2953 	/*
2954 	 * Remapped memory cannot be pinned in any meaningful sense.  Bail if
2955 	 * the caller wants to pin the page, i.e. access the page outside of
2956 	 * MMU notifier protection, and unsafe umappings are disallowed.
2957 	 */
2958 	if (kfp->pin && !allow_unsafe_mappings)
2959 		return -EINVAL;
2960 
2961 	r = follow_pfnmap_start(&args);
2962 	if (r) {
2963 		/*
2964 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2965 		 * not call the fault handler, so do it here.
2966 		 */
2967 		bool unlocked = false;
2968 		r = fixup_user_fault(current->mm, kfp->hva,
2969 				     (write_fault ? FAULT_FLAG_WRITE : 0),
2970 				     &unlocked);
2971 		if (unlocked)
2972 			return -EAGAIN;
2973 		if (r)
2974 			return r;
2975 
2976 		r = follow_pfnmap_start(&args);
2977 		if (r)
2978 			return r;
2979 	}
2980 
2981 	if (write_fault && !args.writable) {
2982 		*p_pfn = KVM_PFN_ERR_RO_FAULT;
2983 		goto out;
2984 	}
2985 
2986 	*p_pfn = kvm_resolve_pfn(kfp, NULL, &args, args.writable);
2987 out:
2988 	follow_pfnmap_end(&args);
2989 	return r;
2990 }
2991 
hva_to_pfn(struct kvm_follow_pfn * kfp)2992 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
2993 {
2994 	struct vm_area_struct *vma;
2995 	kvm_pfn_t pfn;
2996 	int npages, r;
2997 
2998 	might_sleep();
2999 
3000 	if (WARN_ON_ONCE(!kfp->refcounted_page))
3001 		return KVM_PFN_ERR_FAULT;
3002 
3003 	if (hva_to_pfn_fast(kfp, &pfn))
3004 		return pfn;
3005 
3006 	npages = hva_to_pfn_slow(kfp, &pfn);
3007 	if (npages == 1)
3008 		return pfn;
3009 	if (npages == -EINTR || npages == -EAGAIN)
3010 		return KVM_PFN_ERR_SIGPENDING;
3011 	if (npages == -EHWPOISON)
3012 		return KVM_PFN_ERR_HWPOISON;
3013 
3014 	mmap_read_lock(current->mm);
3015 retry:
3016 	vma = vma_lookup(current->mm, kfp->hva);
3017 
3018 	if (vma == NULL)
3019 		pfn = KVM_PFN_ERR_FAULT;
3020 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3021 		r = hva_to_pfn_remapped(vma, kfp, &pfn);
3022 		if (r == -EAGAIN)
3023 			goto retry;
3024 		if (r < 0)
3025 			pfn = KVM_PFN_ERR_FAULT;
3026 	} else {
3027 		if ((kfp->flags & FOLL_NOWAIT) &&
3028 		    vma_is_valid(vma, kfp->flags & FOLL_WRITE))
3029 			pfn = KVM_PFN_ERR_NEEDS_IO;
3030 		else
3031 			pfn = KVM_PFN_ERR_FAULT;
3032 	}
3033 	mmap_read_unlock(current->mm);
3034 	return pfn;
3035 }
3036 
kvm_follow_pfn(struct kvm_follow_pfn * kfp)3037 static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
3038 {
3039 	kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
3040 				     kfp->flags & FOLL_WRITE);
3041 
3042 	if (kfp->hva == KVM_HVA_ERR_RO_BAD)
3043 		return KVM_PFN_ERR_RO_FAULT;
3044 
3045 	if (kvm_is_error_hva(kfp->hva))
3046 		return KVM_PFN_NOSLOT;
3047 
3048 	if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
3049 		*kfp->map_writable = false;
3050 		kfp->map_writable = NULL;
3051 	}
3052 
3053 	return hva_to_pfn(kfp);
3054 }
3055 
__kvm_faultin_pfn(const struct kvm_memory_slot * slot,gfn_t gfn,unsigned int foll,bool * writable,struct page ** refcounted_page)3056 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
3057 			    unsigned int foll, bool *writable,
3058 			    struct page **refcounted_page)
3059 {
3060 	struct kvm_follow_pfn kfp = {
3061 		.slot = slot,
3062 		.gfn = gfn,
3063 		.flags = foll,
3064 		.map_writable = writable,
3065 		.refcounted_page = refcounted_page,
3066 	};
3067 
3068 	if (WARN_ON_ONCE(!writable || !refcounted_page))
3069 		return KVM_PFN_ERR_FAULT;
3070 
3071 	*writable = false;
3072 	*refcounted_page = NULL;
3073 
3074 	return kvm_follow_pfn(&kfp);
3075 }
3076 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
3077 
kvm_prefetch_pages(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)3078 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
3079 		       struct page **pages, int nr_pages)
3080 {
3081 	unsigned long addr;
3082 	gfn_t entry = 0;
3083 
3084 	addr = gfn_to_hva_many(slot, gfn, &entry);
3085 	if (kvm_is_error_hva(addr))
3086 		return -1;
3087 
3088 	if (entry < nr_pages)
3089 		return 0;
3090 
3091 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3092 }
3093 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
3094 
3095 /*
3096  * Don't use this API unless you are absolutely, positively certain that KVM
3097  * needs to get a struct page, e.g. to pin the page for firmware DMA.
3098  *
3099  * FIXME: Users of this API likely need to FOLL_PIN the page, not just elevate
3100  *	  its refcount.
3101  */
__gfn_to_page(struct kvm * kvm,gfn_t gfn,bool write)3102 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
3103 {
3104 	struct page *refcounted_page = NULL;
3105 	struct kvm_follow_pfn kfp = {
3106 		.slot = gfn_to_memslot(kvm, gfn),
3107 		.gfn = gfn,
3108 		.flags = write ? FOLL_WRITE : 0,
3109 		.refcounted_page = &refcounted_page,
3110 	};
3111 
3112 	(void)kvm_follow_pfn(&kfp);
3113 	return refcounted_page;
3114 }
3115 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
3116 
__kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map,bool writable)3117 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
3118 		   bool writable)
3119 {
3120 	struct kvm_follow_pfn kfp = {
3121 		.slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
3122 		.gfn = gfn,
3123 		.flags = writable ? FOLL_WRITE : 0,
3124 		.refcounted_page = &map->pinned_page,
3125 		.pin = true,
3126 	};
3127 
3128 	map->pinned_page = NULL;
3129 	map->page = NULL;
3130 	map->hva = NULL;
3131 	map->gfn = gfn;
3132 	map->writable = writable;
3133 
3134 	map->pfn = kvm_follow_pfn(&kfp);
3135 	if (is_error_noslot_pfn(map->pfn))
3136 		return -EINVAL;
3137 
3138 	if (pfn_valid(map->pfn)) {
3139 		map->page = pfn_to_page(map->pfn);
3140 		map->hva = kmap(map->page);
3141 #ifdef CONFIG_HAS_IOMEM
3142 	} else {
3143 		map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
3144 #endif
3145 	}
3146 
3147 	return map->hva ? 0 : -EFAULT;
3148 }
3149 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
3150 
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map)3151 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
3152 {
3153 	if (!map->hva)
3154 		return;
3155 
3156 	if (map->page)
3157 		kunmap(map->page);
3158 #ifdef CONFIG_HAS_IOMEM
3159 	else
3160 		memunmap(map->hva);
3161 #endif
3162 
3163 	if (map->writable)
3164 		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3165 
3166 	if (map->pinned_page) {
3167 		if (map->writable)
3168 			kvm_set_page_dirty(map->pinned_page);
3169 		kvm_set_page_accessed(map->pinned_page);
3170 		unpin_user_page(map->pinned_page);
3171 	}
3172 
3173 	map->hva = NULL;
3174 	map->page = NULL;
3175 	map->pinned_page = NULL;
3176 }
3177 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
3178 
next_segment(unsigned long len,int offset)3179 static int next_segment(unsigned long len, int offset)
3180 {
3181 	if (len > PAGE_SIZE - offset)
3182 		return PAGE_SIZE - offset;
3183 	else
3184 		return len;
3185 }
3186 
3187 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)3188 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3189 				 void *data, int offset, int len)
3190 {
3191 	int r;
3192 	unsigned long addr;
3193 
3194 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3195 		return -EFAULT;
3196 
3197 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3198 	if (kvm_is_error_hva(addr))
3199 		return -EFAULT;
3200 	r = __copy_from_user(data, (void __user *)addr + offset, len);
3201 	if (r)
3202 		return -EFAULT;
3203 	return 0;
3204 }
3205 
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)3206 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3207 			int len)
3208 {
3209 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3210 
3211 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3212 }
3213 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
3214 
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)3215 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3216 			     int offset, int len)
3217 {
3218 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3219 
3220 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3221 }
3222 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
3223 
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)3224 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3225 {
3226 	gfn_t gfn = gpa >> PAGE_SHIFT;
3227 	int seg;
3228 	int offset = offset_in_page(gpa);
3229 	int ret;
3230 
3231 	while ((seg = next_segment(len, offset)) != 0) {
3232 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3233 		if (ret < 0)
3234 			return ret;
3235 		offset = 0;
3236 		len -= seg;
3237 		data += seg;
3238 		++gfn;
3239 	}
3240 	return 0;
3241 }
3242 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
3243 
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3244 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3245 {
3246 	gfn_t gfn = gpa >> PAGE_SHIFT;
3247 	int seg;
3248 	int offset = offset_in_page(gpa);
3249 	int ret;
3250 
3251 	while ((seg = next_segment(len, offset)) != 0) {
3252 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3253 		if (ret < 0)
3254 			return ret;
3255 		offset = 0;
3256 		len -= seg;
3257 		data += seg;
3258 		++gfn;
3259 	}
3260 	return 0;
3261 }
3262 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
3263 
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)3264 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3265 			           void *data, int offset, unsigned long len)
3266 {
3267 	int r;
3268 	unsigned long addr;
3269 
3270 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3271 		return -EFAULT;
3272 
3273 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3274 	if (kvm_is_error_hva(addr))
3275 		return -EFAULT;
3276 	pagefault_disable();
3277 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3278 	pagefault_enable();
3279 	if (r)
3280 		return -EFAULT;
3281 	return 0;
3282 }
3283 
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3284 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3285 			       void *data, unsigned long len)
3286 {
3287 	gfn_t gfn = gpa >> PAGE_SHIFT;
3288 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3289 	int offset = offset_in_page(gpa);
3290 
3291 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3292 }
3293 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
3294 
3295 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)3296 static int __kvm_write_guest_page(struct kvm *kvm,
3297 				  struct kvm_memory_slot *memslot, gfn_t gfn,
3298 			          const void *data, int offset, int len)
3299 {
3300 	int r;
3301 	unsigned long addr;
3302 
3303 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3304 		return -EFAULT;
3305 
3306 	addr = gfn_to_hva_memslot(memslot, gfn);
3307 	if (kvm_is_error_hva(addr))
3308 		return -EFAULT;
3309 	r = __copy_to_user((void __user *)addr + offset, data, len);
3310 	if (r)
3311 		return -EFAULT;
3312 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3313 	return 0;
3314 }
3315 
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)3316 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3317 			 const void *data, int offset, int len)
3318 {
3319 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3320 
3321 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3322 }
3323 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
3324 
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)3325 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3326 			      const void *data, int offset, int len)
3327 {
3328 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3329 
3330 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3331 }
3332 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
3333 
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)3334 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3335 		    unsigned long len)
3336 {
3337 	gfn_t gfn = gpa >> PAGE_SHIFT;
3338 	int seg;
3339 	int offset = offset_in_page(gpa);
3340 	int ret;
3341 
3342 	while ((seg = next_segment(len, offset)) != 0) {
3343 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3344 		if (ret < 0)
3345 			return ret;
3346 		offset = 0;
3347 		len -= seg;
3348 		data += seg;
3349 		++gfn;
3350 	}
3351 	return 0;
3352 }
3353 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
3354 
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)3355 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3356 		         unsigned long len)
3357 {
3358 	gfn_t gfn = gpa >> PAGE_SHIFT;
3359 	int seg;
3360 	int offset = offset_in_page(gpa);
3361 	int ret;
3362 
3363 	while ((seg = next_segment(len, offset)) != 0) {
3364 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3365 		if (ret < 0)
3366 			return ret;
3367 		offset = 0;
3368 		len -= seg;
3369 		data += seg;
3370 		++gfn;
3371 	}
3372 	return 0;
3373 }
3374 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
3375 
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3376 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3377 				       struct gfn_to_hva_cache *ghc,
3378 				       gpa_t gpa, unsigned long len)
3379 {
3380 	int offset = offset_in_page(gpa);
3381 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3382 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3383 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3384 	gfn_t nr_pages_avail;
3385 
3386 	/* Update ghc->generation before performing any error checks. */
3387 	ghc->generation = slots->generation;
3388 
3389 	if (start_gfn > end_gfn) {
3390 		ghc->hva = KVM_HVA_ERR_BAD;
3391 		return -EINVAL;
3392 	}
3393 
3394 	/*
3395 	 * If the requested region crosses two memslots, we still
3396 	 * verify that the entire region is valid here.
3397 	 */
3398 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3399 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3400 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3401 					   &nr_pages_avail);
3402 		if (kvm_is_error_hva(ghc->hva))
3403 			return -EFAULT;
3404 	}
3405 
3406 	/* Use the slow path for cross page reads and writes. */
3407 	if (nr_pages_needed == 1)
3408 		ghc->hva += offset;
3409 	else
3410 		ghc->memslot = NULL;
3411 
3412 	ghc->gpa = gpa;
3413 	ghc->len = len;
3414 	return 0;
3415 }
3416 
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3417 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3418 			      gpa_t gpa, unsigned long len)
3419 {
3420 	struct kvm_memslots *slots = kvm_memslots(kvm);
3421 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3422 }
3423 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
3424 
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3425 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3426 				  void *data, unsigned int offset,
3427 				  unsigned long len)
3428 {
3429 	struct kvm_memslots *slots = kvm_memslots(kvm);
3430 	int r;
3431 	gpa_t gpa = ghc->gpa + offset;
3432 
3433 	if (WARN_ON_ONCE(len + offset > ghc->len))
3434 		return -EINVAL;
3435 
3436 	if (slots->generation != ghc->generation) {
3437 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3438 			return -EFAULT;
3439 	}
3440 
3441 	if (kvm_is_error_hva(ghc->hva))
3442 		return -EFAULT;
3443 
3444 	if (unlikely(!ghc->memslot))
3445 		return kvm_write_guest(kvm, gpa, data, len);
3446 
3447 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3448 	if (r)
3449 		return -EFAULT;
3450 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3451 
3452 	return 0;
3453 }
3454 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
3455 
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3456 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3457 			   void *data, unsigned long len)
3458 {
3459 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3460 }
3461 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
3462 
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3463 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3464 				 void *data, unsigned int offset,
3465 				 unsigned long len)
3466 {
3467 	struct kvm_memslots *slots = kvm_memslots(kvm);
3468 	int r;
3469 	gpa_t gpa = ghc->gpa + offset;
3470 
3471 	if (WARN_ON_ONCE(len + offset > ghc->len))
3472 		return -EINVAL;
3473 
3474 	if (slots->generation != ghc->generation) {
3475 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3476 			return -EFAULT;
3477 	}
3478 
3479 	if (kvm_is_error_hva(ghc->hva))
3480 		return -EFAULT;
3481 
3482 	if (unlikely(!ghc->memslot))
3483 		return kvm_read_guest(kvm, gpa, data, len);
3484 
3485 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3486 	if (r)
3487 		return -EFAULT;
3488 
3489 	return 0;
3490 }
3491 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
3492 
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3493 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3494 			  void *data, unsigned long len)
3495 {
3496 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3497 }
3498 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
3499 
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3500 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3501 {
3502 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3503 	gfn_t gfn = gpa >> PAGE_SHIFT;
3504 	int seg;
3505 	int offset = offset_in_page(gpa);
3506 	int ret;
3507 
3508 	while ((seg = next_segment(len, offset)) != 0) {
3509 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
3510 		if (ret < 0)
3511 			return ret;
3512 		offset = 0;
3513 		len -= seg;
3514 		++gfn;
3515 	}
3516 	return 0;
3517 }
3518 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
3519 
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)3520 void mark_page_dirty_in_slot(struct kvm *kvm,
3521 			     const struct kvm_memory_slot *memslot,
3522 		 	     gfn_t gfn)
3523 {
3524 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3525 
3526 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3527 	if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3528 		return;
3529 
3530 	WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3531 #endif
3532 
3533 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3534 		unsigned long rel_gfn = gfn - memslot->base_gfn;
3535 		u32 slot = (memslot->as_id << 16) | memslot->id;
3536 
3537 		if (kvm->dirty_ring_size && vcpu)
3538 			kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3539 		else if (memslot->dirty_bitmap)
3540 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
3541 	}
3542 }
3543 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
3544 
mark_page_dirty(struct kvm * kvm,gfn_t gfn)3545 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3546 {
3547 	struct kvm_memory_slot *memslot;
3548 
3549 	memslot = gfn_to_memslot(kvm, gfn);
3550 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3551 }
3552 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
3553 
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)3554 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3555 {
3556 	struct kvm_memory_slot *memslot;
3557 
3558 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3559 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3560 }
3561 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
3562 
kvm_sigset_activate(struct kvm_vcpu * vcpu)3563 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3564 {
3565 	if (!vcpu->sigset_active)
3566 		return;
3567 
3568 	/*
3569 	 * This does a lockless modification of ->real_blocked, which is fine
3570 	 * because, only current can change ->real_blocked and all readers of
3571 	 * ->real_blocked don't care as long ->real_blocked is always a subset
3572 	 * of ->blocked.
3573 	 */
3574 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3575 }
3576 
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3577 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3578 {
3579 	if (!vcpu->sigset_active)
3580 		return;
3581 
3582 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3583 	sigemptyset(&current->real_blocked);
3584 }
3585 
grow_halt_poll_ns(struct kvm_vcpu * vcpu)3586 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3587 {
3588 	unsigned int old, val, grow, grow_start;
3589 
3590 	old = val = vcpu->halt_poll_ns;
3591 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3592 	grow = READ_ONCE(halt_poll_ns_grow);
3593 	if (!grow)
3594 		goto out;
3595 
3596 	val *= grow;
3597 	if (val < grow_start)
3598 		val = grow_start;
3599 
3600 	vcpu->halt_poll_ns = val;
3601 out:
3602 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3603 }
3604 
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3605 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3606 {
3607 	unsigned int old, val, shrink, grow_start;
3608 
3609 	old = val = vcpu->halt_poll_ns;
3610 	shrink = READ_ONCE(halt_poll_ns_shrink);
3611 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3612 	if (shrink == 0)
3613 		val = 0;
3614 	else
3615 		val /= shrink;
3616 
3617 	if (val < grow_start)
3618 		val = 0;
3619 
3620 	vcpu->halt_poll_ns = val;
3621 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3622 }
3623 
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)3624 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3625 {
3626 	int ret = -EINTR;
3627 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3628 
3629 	if (kvm_arch_vcpu_runnable(vcpu))
3630 		goto out;
3631 	if (kvm_cpu_has_pending_timer(vcpu))
3632 		goto out;
3633 	if (signal_pending(current))
3634 		goto out;
3635 	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3636 		goto out;
3637 
3638 	ret = 0;
3639 out:
3640 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3641 	return ret;
3642 }
3643 
3644 /*
3645  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3646  * pending.  This is mostly used when halting a vCPU, but may also be used
3647  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3648  */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3649 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3650 {
3651 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3652 	bool waited = false;
3653 
3654 	vcpu->stat.generic.blocking = 1;
3655 
3656 	preempt_disable();
3657 	kvm_arch_vcpu_blocking(vcpu);
3658 	prepare_to_rcuwait(wait);
3659 	preempt_enable();
3660 
3661 	for (;;) {
3662 		set_current_state(TASK_INTERRUPTIBLE);
3663 
3664 		if (kvm_vcpu_check_block(vcpu) < 0)
3665 			break;
3666 
3667 		waited = true;
3668 		schedule();
3669 	}
3670 
3671 	preempt_disable();
3672 	finish_rcuwait(wait);
3673 	kvm_arch_vcpu_unblocking(vcpu);
3674 	preempt_enable();
3675 
3676 	vcpu->stat.generic.blocking = 0;
3677 
3678 	return waited;
3679 }
3680 
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)3681 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3682 					  ktime_t end, bool success)
3683 {
3684 	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3685 	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3686 
3687 	++vcpu->stat.generic.halt_attempted_poll;
3688 
3689 	if (success) {
3690 		++vcpu->stat.generic.halt_successful_poll;
3691 
3692 		if (!vcpu_valid_wakeup(vcpu))
3693 			++vcpu->stat.generic.halt_poll_invalid;
3694 
3695 		stats->halt_poll_success_ns += poll_ns;
3696 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3697 	} else {
3698 		stats->halt_poll_fail_ns += poll_ns;
3699 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3700 	}
3701 }
3702 
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3703 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3704 {
3705 	struct kvm *kvm = vcpu->kvm;
3706 
3707 	if (kvm->override_halt_poll_ns) {
3708 		/*
3709 		 * Ensure kvm->max_halt_poll_ns is not read before
3710 		 * kvm->override_halt_poll_ns.
3711 		 *
3712 		 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3713 		 */
3714 		smp_rmb();
3715 		return READ_ONCE(kvm->max_halt_poll_ns);
3716 	}
3717 
3718 	return READ_ONCE(halt_poll_ns);
3719 }
3720 
3721 /*
3722  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3723  * polling is enabled, busy wait for a short time before blocking to avoid the
3724  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3725  * is halted.
3726  */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3727 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3728 {
3729 	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3730 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3731 	ktime_t start, cur, poll_end;
3732 	bool waited = false;
3733 	bool do_halt_poll;
3734 	u64 halt_ns;
3735 
3736 	if (vcpu->halt_poll_ns > max_halt_poll_ns)
3737 		vcpu->halt_poll_ns = max_halt_poll_ns;
3738 
3739 	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3740 
3741 	start = cur = poll_end = ktime_get();
3742 	if (do_halt_poll) {
3743 		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3744 
3745 		do {
3746 			if (kvm_vcpu_check_block(vcpu) < 0)
3747 				goto out;
3748 			cpu_relax();
3749 			poll_end = cur = ktime_get();
3750 		} while (kvm_vcpu_can_poll(cur, stop));
3751 	}
3752 
3753 	waited = kvm_vcpu_block(vcpu);
3754 
3755 	cur = ktime_get();
3756 	if (waited) {
3757 		vcpu->stat.generic.halt_wait_ns +=
3758 			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3759 		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3760 				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3761 	}
3762 out:
3763 	/* The total time the vCPU was "halted", including polling time. */
3764 	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3765 
3766 	/*
3767 	 * Note, halt-polling is considered successful so long as the vCPU was
3768 	 * never actually scheduled out, i.e. even if the wake event arrived
3769 	 * after of the halt-polling loop itself, but before the full wait.
3770 	 */
3771 	if (do_halt_poll)
3772 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
3773 
3774 	if (halt_poll_allowed) {
3775 		/* Recompute the max halt poll time in case it changed. */
3776 		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3777 
3778 		if (!vcpu_valid_wakeup(vcpu)) {
3779 			shrink_halt_poll_ns(vcpu);
3780 		} else if (max_halt_poll_ns) {
3781 			if (halt_ns <= vcpu->halt_poll_ns)
3782 				;
3783 			/* we had a long block, shrink polling */
3784 			else if (vcpu->halt_poll_ns &&
3785 				 halt_ns > max_halt_poll_ns)
3786 				shrink_halt_poll_ns(vcpu);
3787 			/* we had a short halt and our poll time is too small */
3788 			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3789 				 halt_ns < max_halt_poll_ns)
3790 				grow_halt_poll_ns(vcpu);
3791 		} else {
3792 			vcpu->halt_poll_ns = 0;
3793 		}
3794 	}
3795 
3796 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3797 }
3798 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
3799 
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3800 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3801 {
3802 	if (__kvm_vcpu_wake_up(vcpu)) {
3803 		WRITE_ONCE(vcpu->ready, true);
3804 		++vcpu->stat.generic.halt_wakeup;
3805 		return true;
3806 	}
3807 
3808 	return false;
3809 }
3810 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
3811 
3812 #ifndef CONFIG_S390
3813 /*
3814  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3815  */
__kvm_vcpu_kick(struct kvm_vcpu * vcpu,bool wait)3816 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
3817 {
3818 	int me, cpu;
3819 
3820 	if (kvm_vcpu_wake_up(vcpu))
3821 		return;
3822 
3823 	me = get_cpu();
3824 	/*
3825 	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3826 	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3827 	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3828 	 * within the vCPU thread itself.
3829 	 */
3830 	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3831 		if (vcpu->mode == IN_GUEST_MODE)
3832 			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3833 		goto out;
3834 	}
3835 
3836 	/*
3837 	 * Note, the vCPU could get migrated to a different pCPU at any point
3838 	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3839 	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3840 	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3841 	 * vCPU also requires it to leave IN_GUEST_MODE.
3842 	 */
3843 	if (kvm_arch_vcpu_should_kick(vcpu)) {
3844 		cpu = READ_ONCE(vcpu->cpu);
3845 		if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
3846 			/*
3847 			 * Use a reschedule IPI to kick the vCPU if the caller
3848 			 * doesn't need to wait for a response, as KVM allows
3849 			 * kicking vCPUs while IRQs are disabled, but using the
3850 			 * SMP function call framework with IRQs disabled can
3851 			 * deadlock due to taking cross-CPU locks.
3852 			 */
3853 			if (wait)
3854 				smp_call_function_single(cpu, ack_kick, NULL, wait);
3855 			else
3856 				smp_send_reschedule(cpu);
3857 		}
3858 	}
3859 out:
3860 	put_cpu();
3861 }
3862 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
3863 #endif /* !CONFIG_S390 */
3864 
kvm_vcpu_yield_to(struct kvm_vcpu * target)3865 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3866 {
3867 	struct task_struct *task = NULL;
3868 	int ret;
3869 
3870 	if (!read_trylock(&target->pid_lock))
3871 		return 0;
3872 
3873 	if (target->pid)
3874 		task = get_pid_task(target->pid, PIDTYPE_PID);
3875 
3876 	read_unlock(&target->pid_lock);
3877 
3878 	if (!task)
3879 		return 0;
3880 	ret = yield_to(task, 1);
3881 	put_task_struct(task);
3882 
3883 	return ret;
3884 }
3885 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
3886 
3887 /*
3888  * Helper that checks whether a VCPU is eligible for directed yield.
3889  * Most eligible candidate to yield is decided by following heuristics:
3890  *
3891  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3892  *  (preempted lock holder), indicated by @in_spin_loop.
3893  *  Set at the beginning and cleared at the end of interception/PLE handler.
3894  *
3895  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3896  *  chance last time (mostly it has become eligible now since we have probably
3897  *  yielded to lockholder in last iteration. This is done by toggling
3898  *  @dy_eligible each time a VCPU checked for eligibility.)
3899  *
3900  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3901  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3902  *  burning. Giving priority for a potential lock-holder increases lock
3903  *  progress.
3904  *
3905  *  Since algorithm is based on heuristics, accessing another VCPU data without
3906  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3907  *  and continue with next VCPU and so on.
3908  */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)3909 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3910 {
3911 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3912 	bool eligible;
3913 
3914 	eligible = !vcpu->spin_loop.in_spin_loop ||
3915 		    vcpu->spin_loop.dy_eligible;
3916 
3917 	if (vcpu->spin_loop.in_spin_loop)
3918 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3919 
3920 	return eligible;
3921 #else
3922 	return true;
3923 #endif
3924 }
3925 
3926 /*
3927  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3928  * a vcpu_load/vcpu_put pair.  However, for most architectures
3929  * kvm_arch_vcpu_runnable does not require vcpu_load.
3930  */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3931 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3932 {
3933 	return kvm_arch_vcpu_runnable(vcpu);
3934 }
3935 
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3936 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3937 {
3938 	if (kvm_arch_dy_runnable(vcpu))
3939 		return true;
3940 
3941 #ifdef CONFIG_KVM_ASYNC_PF
3942 	if (!list_empty_careful(&vcpu->async_pf.done))
3943 		return true;
3944 #endif
3945 
3946 	return false;
3947 }
3948 
3949 /*
3950  * By default, simply query the target vCPU's current mode when checking if a
3951  * vCPU was preempted in kernel mode.  All architectures except x86 (or more
3952  * specifical, except VMX) allow querying whether or not a vCPU is in kernel
3953  * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
3954  * directly for cross-vCPU checks is functionally correct and accurate.
3955  */
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)3956 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
3957 {
3958 	return kvm_arch_vcpu_in_kernel(vcpu);
3959 }
3960 
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)3961 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3962 {
3963 	return false;
3964 }
3965 
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)3966 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3967 {
3968 	int nr_vcpus, start, i, idx, yielded;
3969 	struct kvm *kvm = me->kvm;
3970 	struct kvm_vcpu *vcpu;
3971 	int try = 3;
3972 
3973 	nr_vcpus = atomic_read(&kvm->online_vcpus);
3974 	if (nr_vcpus < 2)
3975 		return;
3976 
3977 	/* Pairs with the smp_wmb() in kvm_vm_ioctl_create_vcpu(). */
3978 	smp_rmb();
3979 
3980 	kvm_vcpu_set_in_spin_loop(me, true);
3981 
3982 	/*
3983 	 * The current vCPU ("me") is spinning in kernel mode, i.e. is likely
3984 	 * waiting for a resource to become available.  Attempt to yield to a
3985 	 * vCPU that is runnable, but not currently running, e.g. because the
3986 	 * vCPU was preempted by a higher priority task.  With luck, the vCPU
3987 	 * that was preempted is holding a lock or some other resource that the
3988 	 * current vCPU is waiting to acquire, and yielding to the other vCPU
3989 	 * will allow it to make forward progress and release the lock (or kick
3990 	 * the spinning vCPU, etc).
3991 	 *
3992 	 * Since KVM has no insight into what exactly the guest is doing,
3993 	 * approximate a round-robin selection by iterating over all vCPUs,
3994 	 * starting at the last boosted vCPU.  I.e. if N=kvm->last_boosted_vcpu,
3995 	 * iterate over vCPU[N+1]..vCPU[N-1], wrapping as needed.
3996 	 *
3997 	 * Note, this is inherently racy, e.g. if multiple vCPUs are spinning,
3998 	 * they may all try to yield to the same vCPU(s).  But as above, this
3999 	 * is all best effort due to KVM's lack of visibility into the guest.
4000 	 */
4001 	start = READ_ONCE(kvm->last_boosted_vcpu) + 1;
4002 	for (i = 0; i < nr_vcpus; i++) {
4003 		idx = (start + i) % nr_vcpus;
4004 		if (idx == me->vcpu_idx)
4005 			continue;
4006 
4007 		vcpu = xa_load(&kvm->vcpu_array, idx);
4008 		if (!READ_ONCE(vcpu->ready))
4009 			continue;
4010 		if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4011 			continue;
4012 
4013 		/*
4014 		 * Treat the target vCPU as being in-kernel if it has a pending
4015 		 * interrupt, as the vCPU trying to yield may be spinning
4016 		 * waiting on IPI delivery, i.e. the target vCPU is in-kernel
4017 		 * for the purposes of directed yield.
4018 		 */
4019 		if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4020 		    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4021 		    !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4022 			continue;
4023 
4024 		if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4025 			continue;
4026 
4027 		yielded = kvm_vcpu_yield_to(vcpu);
4028 		if (yielded > 0) {
4029 			WRITE_ONCE(kvm->last_boosted_vcpu, idx);
4030 			break;
4031 		} else if (yielded < 0 && !--try) {
4032 			break;
4033 		}
4034 	}
4035 	kvm_vcpu_set_in_spin_loop(me, false);
4036 
4037 	/* Ensure vcpu is not eligible during next spinloop */
4038 	kvm_vcpu_set_dy_eligible(me, false);
4039 }
4040 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
4041 
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)4042 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4043 {
4044 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4045 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4046 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4047 	     kvm->dirty_ring_size / PAGE_SIZE);
4048 #else
4049 	return false;
4050 #endif
4051 }
4052 
kvm_vcpu_fault(struct vm_fault * vmf)4053 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4054 {
4055 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4056 	struct page *page;
4057 
4058 	if (vmf->pgoff == 0)
4059 		page = virt_to_page(vcpu->run);
4060 #ifdef CONFIG_X86
4061 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4062 		page = virt_to_page(vcpu->arch.pio_data);
4063 #endif
4064 #ifdef CONFIG_KVM_MMIO
4065 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4066 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4067 #endif
4068 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4069 		page = kvm_dirty_ring_get_page(
4070 		    &vcpu->dirty_ring,
4071 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4072 	else
4073 		return kvm_arch_vcpu_fault(vcpu, vmf);
4074 	get_page(page);
4075 	vmf->page = page;
4076 	return 0;
4077 }
4078 
4079 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4080 	.fault = kvm_vcpu_fault,
4081 };
4082 
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)4083 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4084 {
4085 	struct kvm_vcpu *vcpu = file->private_data;
4086 	unsigned long pages = vma_pages(vma);
4087 
4088 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4089 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4090 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4091 		return -EINVAL;
4092 
4093 	vma->vm_ops = &kvm_vcpu_vm_ops;
4094 	return 0;
4095 }
4096 
kvm_vcpu_release(struct inode * inode,struct file * filp)4097 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4098 {
4099 	struct kvm_vcpu *vcpu = filp->private_data;
4100 
4101 	kvm_put_kvm(vcpu->kvm);
4102 	return 0;
4103 }
4104 
4105 static struct file_operations kvm_vcpu_fops = {
4106 	.release        = kvm_vcpu_release,
4107 	.unlocked_ioctl = kvm_vcpu_ioctl,
4108 	.mmap           = kvm_vcpu_mmap,
4109 	.llseek		= noop_llseek,
4110 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
4111 };
4112 
4113 /*
4114  * Allocates an inode for the vcpu.
4115  */
create_vcpu_fd(struct kvm_vcpu * vcpu)4116 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4117 {
4118 	char name[8 + 1 + ITOA_MAX_LEN + 1];
4119 
4120 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4121 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4122 }
4123 
4124 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)4125 static int vcpu_get_pid(void *data, u64 *val)
4126 {
4127 	struct kvm_vcpu *vcpu = data;
4128 
4129 	read_lock(&vcpu->pid_lock);
4130 	*val = pid_nr(vcpu->pid);
4131 	read_unlock(&vcpu->pid_lock);
4132 	return 0;
4133 }
4134 
4135 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4136 
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)4137 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4138 {
4139 	struct dentry *debugfs_dentry;
4140 	char dir_name[ITOA_MAX_LEN * 2];
4141 
4142 	if (!debugfs_initialized())
4143 		return;
4144 
4145 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4146 	debugfs_dentry = debugfs_create_dir(dir_name,
4147 					    vcpu->kvm->debugfs_dentry);
4148 	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4149 			    &vcpu_get_pid_fops);
4150 
4151 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4152 }
4153 #endif
4154 
4155 /*
4156  * Creates some virtual cpus.  Good luck creating more than one.
4157  */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id)4158 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4159 {
4160 	int r;
4161 	struct kvm_vcpu *vcpu;
4162 	struct page *page;
4163 
4164 	/*
4165 	 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4166 	 * too-large values instead of silently truncating.
4167 	 *
4168 	 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4169 	 * changing the storage type (at the very least, IDs should be tracked
4170 	 * as unsigned ints).
4171 	 */
4172 	BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4173 	if (id >= KVM_MAX_VCPU_IDS)
4174 		return -EINVAL;
4175 
4176 	mutex_lock(&kvm->lock);
4177 	if (kvm->created_vcpus >= kvm->max_vcpus) {
4178 		mutex_unlock(&kvm->lock);
4179 		return -EINVAL;
4180 	}
4181 
4182 	r = kvm_arch_vcpu_precreate(kvm, id);
4183 	if (r) {
4184 		mutex_unlock(&kvm->lock);
4185 		return r;
4186 	}
4187 
4188 	kvm->created_vcpus++;
4189 	mutex_unlock(&kvm->lock);
4190 
4191 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4192 	if (!vcpu) {
4193 		r = -ENOMEM;
4194 		goto vcpu_decrement;
4195 	}
4196 
4197 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4198 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4199 	if (!page) {
4200 		r = -ENOMEM;
4201 		goto vcpu_free;
4202 	}
4203 	vcpu->run = page_address(page);
4204 
4205 	kvm_vcpu_init(vcpu, kvm, id);
4206 
4207 	r = kvm_arch_vcpu_create(vcpu);
4208 	if (r)
4209 		goto vcpu_free_run_page;
4210 
4211 	if (kvm->dirty_ring_size) {
4212 		r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring,
4213 					 id, kvm->dirty_ring_size);
4214 		if (r)
4215 			goto arch_vcpu_destroy;
4216 	}
4217 
4218 	mutex_lock(&kvm->lock);
4219 
4220 	if (kvm_get_vcpu_by_id(kvm, id)) {
4221 		r = -EEXIST;
4222 		goto unlock_vcpu_destroy;
4223 	}
4224 
4225 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4226 	r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
4227 	WARN_ON_ONCE(r == -EBUSY);
4228 	if (r)
4229 		goto unlock_vcpu_destroy;
4230 
4231 	/*
4232 	 * Now it's all set up, let userspace reach it.  Grab the vCPU's mutex
4233 	 * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
4234 	 * visible (per online_vcpus), e.g. so that KVM doesn't get tricked
4235 	 * into a NULL-pointer dereference because KVM thinks the _current_
4236 	 * vCPU doesn't exist.  As a bonus, taking vcpu->mutex ensures lockdep
4237 	 * knows it's taken *inside* kvm->lock.
4238 	 */
4239 	mutex_lock(&vcpu->mutex);
4240 	kvm_get_kvm(kvm);
4241 	r = create_vcpu_fd(vcpu);
4242 	if (r < 0)
4243 		goto kvm_put_xa_erase;
4244 
4245 	/*
4246 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
4247 	 * pointer before kvm->online_vcpu's incremented value.
4248 	 */
4249 	smp_wmb();
4250 	atomic_inc(&kvm->online_vcpus);
4251 	mutex_unlock(&vcpu->mutex);
4252 
4253 	mutex_unlock(&kvm->lock);
4254 	kvm_arch_vcpu_postcreate(vcpu);
4255 	kvm_create_vcpu_debugfs(vcpu);
4256 	return r;
4257 
4258 kvm_put_xa_erase:
4259 	mutex_unlock(&vcpu->mutex);
4260 	kvm_put_kvm_no_destroy(kvm);
4261 	xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
4262 unlock_vcpu_destroy:
4263 	mutex_unlock(&kvm->lock);
4264 	kvm_dirty_ring_free(&vcpu->dirty_ring);
4265 arch_vcpu_destroy:
4266 	kvm_arch_vcpu_destroy(vcpu);
4267 vcpu_free_run_page:
4268 	free_page((unsigned long)vcpu->run);
4269 vcpu_free:
4270 	kmem_cache_free(kvm_vcpu_cache, vcpu);
4271 vcpu_decrement:
4272 	mutex_lock(&kvm->lock);
4273 	kvm->created_vcpus--;
4274 	mutex_unlock(&kvm->lock);
4275 	return r;
4276 }
4277 
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4278 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4279 {
4280 	if (sigset) {
4281 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4282 		vcpu->sigset_active = 1;
4283 		vcpu->sigset = *sigset;
4284 	} else
4285 		vcpu->sigset_active = 0;
4286 	return 0;
4287 }
4288 
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4289 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4290 			      size_t size, loff_t *offset)
4291 {
4292 	struct kvm_vcpu *vcpu = file->private_data;
4293 
4294 	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4295 			&kvm_vcpu_stats_desc[0], &vcpu->stat,
4296 			sizeof(vcpu->stat), user_buffer, size, offset);
4297 }
4298 
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4299 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4300 {
4301 	struct kvm_vcpu *vcpu = file->private_data;
4302 
4303 	kvm_put_kvm(vcpu->kvm);
4304 	return 0;
4305 }
4306 
4307 static const struct file_operations kvm_vcpu_stats_fops = {
4308 	.owner = THIS_MODULE,
4309 	.read = kvm_vcpu_stats_read,
4310 	.release = kvm_vcpu_stats_release,
4311 	.llseek = noop_llseek,
4312 };
4313 
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)4314 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4315 {
4316 	int fd;
4317 	struct file *file;
4318 	char name[15 + ITOA_MAX_LEN + 1];
4319 
4320 	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4321 
4322 	fd = get_unused_fd_flags(O_CLOEXEC);
4323 	if (fd < 0)
4324 		return fd;
4325 
4326 	file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu,
4327 					O_RDONLY, FMODE_PREAD);
4328 	if (IS_ERR(file)) {
4329 		put_unused_fd(fd);
4330 		return PTR_ERR(file);
4331 	}
4332 
4333 	kvm_get_kvm(vcpu->kvm);
4334 	fd_install(fd, file);
4335 
4336 	return fd;
4337 }
4338 
4339 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
kvm_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4340 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4341 				     struct kvm_pre_fault_memory *range)
4342 {
4343 	int idx;
4344 	long r;
4345 	u64 full_size;
4346 
4347 	if (range->flags)
4348 		return -EINVAL;
4349 
4350 	if (!PAGE_ALIGNED(range->gpa) ||
4351 	    !PAGE_ALIGNED(range->size) ||
4352 	    range->gpa + range->size <= range->gpa)
4353 		return -EINVAL;
4354 
4355 	vcpu_load(vcpu);
4356 	idx = srcu_read_lock(&vcpu->kvm->srcu);
4357 
4358 	full_size = range->size;
4359 	do {
4360 		if (signal_pending(current)) {
4361 			r = -EINTR;
4362 			break;
4363 		}
4364 
4365 		r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4366 		if (WARN_ON_ONCE(r == 0 || r == -EIO))
4367 			break;
4368 
4369 		if (r < 0)
4370 			break;
4371 
4372 		range->size -= r;
4373 		range->gpa += r;
4374 		cond_resched();
4375 	} while (range->size);
4376 
4377 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
4378 	vcpu_put(vcpu);
4379 
4380 	/* Return success if at least one page was mapped successfully.  */
4381 	return full_size == range->size ? r : 0;
4382 }
4383 #endif
4384 
kvm_wait_for_vcpu_online(struct kvm_vcpu * vcpu)4385 static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
4386 {
4387 	struct kvm *kvm = vcpu->kvm;
4388 
4389 	/*
4390 	 * In practice, this happy path will always be taken, as a well-behaved
4391 	 * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns.
4392 	 */
4393 	if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus)))
4394 		return 0;
4395 
4396 	/*
4397 	 * Acquire and release the vCPU's mutex to wait for vCPU creation to
4398 	 * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU
4399 	 * is fully online).
4400 	 */
4401 	if (mutex_lock_killable(&vcpu->mutex))
4402 		return -EINTR;
4403 
4404 	mutex_unlock(&vcpu->mutex);
4405 
4406 	if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx)))
4407 		return -EIO;
4408 
4409 	return 0;
4410 }
4411 
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4412 static long kvm_vcpu_ioctl(struct file *filp,
4413 			   unsigned int ioctl, unsigned long arg)
4414 {
4415 	struct kvm_vcpu *vcpu = filp->private_data;
4416 	void __user *argp = (void __user *)arg;
4417 	int r;
4418 	struct kvm_fpu *fpu = NULL;
4419 	struct kvm_sregs *kvm_sregs = NULL;
4420 
4421 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4422 		return -EIO;
4423 
4424 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4425 		return -EINVAL;
4426 
4427 	/*
4428 	 * Wait for the vCPU to be online before handling the ioctl(), as KVM
4429 	 * assumes the vCPU is reachable via vcpu_array, i.e. may dereference
4430 	 * a NULL pointer if userspace invokes an ioctl() before KVM is ready.
4431 	 */
4432 	r = kvm_wait_for_vcpu_online(vcpu);
4433 	if (r)
4434 		return r;
4435 
4436 	/*
4437 	 * Let arch code handle select vCPU ioctls without holding vcpu->mutex,
4438 	 * e.g. to support ioctls that can run asynchronous to vCPU execution.
4439 	 */
4440 	r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
4441 	if (r != -ENOIOCTLCMD)
4442 		return r;
4443 
4444 	if (mutex_lock_killable(&vcpu->mutex))
4445 		return -EINTR;
4446 	switch (ioctl) {
4447 	case KVM_RUN: {
4448 		struct pid *oldpid;
4449 		r = -EINVAL;
4450 		if (arg)
4451 			goto out;
4452 
4453 		/*
4454 		 * Note, vcpu->pid is primarily protected by vcpu->mutex. The
4455 		 * dedicated r/w lock allows other tasks, e.g. other vCPUs, to
4456 		 * read vcpu->pid while this vCPU is in KVM_RUN, e.g. to yield
4457 		 * directly to this vCPU
4458 		 */
4459 		oldpid = vcpu->pid;
4460 		if (unlikely(oldpid != task_pid(current))) {
4461 			/* The thread running this VCPU changed. */
4462 			struct pid *newpid;
4463 
4464 			r = kvm_arch_vcpu_run_pid_change(vcpu);
4465 			if (r)
4466 				break;
4467 
4468 			newpid = get_task_pid(current, PIDTYPE_PID);
4469 			write_lock(&vcpu->pid_lock);
4470 			vcpu->pid = newpid;
4471 			write_unlock(&vcpu->pid_lock);
4472 
4473 			put_pid(oldpid);
4474 		}
4475 		vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4476 		r = kvm_arch_vcpu_ioctl_run(vcpu);
4477 		vcpu->wants_to_run = false;
4478 
4479 		/*
4480 		 * FIXME: Remove this hack once all KVM architectures
4481 		 * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
4482 		 */
4483 		rseq_virt_userspace_exit();
4484 
4485 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4486 		break;
4487 	}
4488 	case KVM_GET_REGS: {
4489 		struct kvm_regs *kvm_regs;
4490 
4491 		r = -ENOMEM;
4492 		kvm_regs = kzalloc_obj(struct kvm_regs);
4493 		if (!kvm_regs)
4494 			goto out;
4495 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4496 		if (r)
4497 			goto out_free1;
4498 		r = -EFAULT;
4499 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4500 			goto out_free1;
4501 		r = 0;
4502 out_free1:
4503 		kfree(kvm_regs);
4504 		break;
4505 	}
4506 	case KVM_SET_REGS: {
4507 		struct kvm_regs *kvm_regs;
4508 
4509 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4510 		if (IS_ERR(kvm_regs)) {
4511 			r = PTR_ERR(kvm_regs);
4512 			goto out;
4513 		}
4514 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4515 		kfree(kvm_regs);
4516 		break;
4517 	}
4518 	case KVM_GET_SREGS: {
4519 		kvm_sregs = kzalloc_obj(struct kvm_sregs);
4520 		r = -ENOMEM;
4521 		if (!kvm_sregs)
4522 			goto out;
4523 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4524 		if (r)
4525 			goto out;
4526 		r = -EFAULT;
4527 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4528 			goto out;
4529 		r = 0;
4530 		break;
4531 	}
4532 	case KVM_SET_SREGS: {
4533 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4534 		if (IS_ERR(kvm_sregs)) {
4535 			r = PTR_ERR(kvm_sregs);
4536 			kvm_sregs = NULL;
4537 			goto out;
4538 		}
4539 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4540 		break;
4541 	}
4542 	case KVM_GET_MP_STATE: {
4543 		struct kvm_mp_state mp_state;
4544 
4545 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4546 		if (r)
4547 			goto out;
4548 		r = -EFAULT;
4549 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4550 			goto out;
4551 		r = 0;
4552 		break;
4553 	}
4554 	case KVM_SET_MP_STATE: {
4555 		struct kvm_mp_state mp_state;
4556 
4557 		r = -EFAULT;
4558 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4559 			goto out;
4560 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4561 		break;
4562 	}
4563 	case KVM_TRANSLATE: {
4564 		struct kvm_translation tr;
4565 
4566 		r = -EFAULT;
4567 		if (copy_from_user(&tr, argp, sizeof(tr)))
4568 			goto out;
4569 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4570 		if (r)
4571 			goto out;
4572 		r = -EFAULT;
4573 		if (copy_to_user(argp, &tr, sizeof(tr)))
4574 			goto out;
4575 		r = 0;
4576 		break;
4577 	}
4578 	case KVM_SET_GUEST_DEBUG: {
4579 		struct kvm_guest_debug dbg;
4580 
4581 		r = -EFAULT;
4582 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
4583 			goto out;
4584 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4585 		break;
4586 	}
4587 	case KVM_SET_SIGNAL_MASK: {
4588 		struct kvm_signal_mask __user *sigmask_arg = argp;
4589 		struct kvm_signal_mask kvm_sigmask;
4590 		sigset_t sigset, *p;
4591 
4592 		p = NULL;
4593 		if (argp) {
4594 			r = -EFAULT;
4595 			if (copy_from_user(&kvm_sigmask, argp,
4596 					   sizeof(kvm_sigmask)))
4597 				goto out;
4598 			r = -EINVAL;
4599 			if (kvm_sigmask.len != sizeof(sigset))
4600 				goto out;
4601 			r = -EFAULT;
4602 			if (copy_from_user(&sigset, sigmask_arg->sigset,
4603 					   sizeof(sigset)))
4604 				goto out;
4605 			p = &sigset;
4606 		}
4607 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4608 		break;
4609 	}
4610 	case KVM_GET_FPU: {
4611 		fpu = kzalloc_obj(struct kvm_fpu);
4612 		r = -ENOMEM;
4613 		if (!fpu)
4614 			goto out;
4615 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4616 		if (r)
4617 			goto out;
4618 		r = -EFAULT;
4619 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4620 			goto out;
4621 		r = 0;
4622 		break;
4623 	}
4624 	case KVM_SET_FPU: {
4625 		fpu = memdup_user(argp, sizeof(*fpu));
4626 		if (IS_ERR(fpu)) {
4627 			r = PTR_ERR(fpu);
4628 			fpu = NULL;
4629 			goto out;
4630 		}
4631 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4632 		break;
4633 	}
4634 	case KVM_GET_STATS_FD: {
4635 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4636 		break;
4637 	}
4638 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4639 	case KVM_PRE_FAULT_MEMORY: {
4640 		struct kvm_pre_fault_memory range;
4641 
4642 		r = -EFAULT;
4643 		if (copy_from_user(&range, argp, sizeof(range)))
4644 			break;
4645 		r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4646 		/* Pass back leftover range. */
4647 		if (copy_to_user(argp, &range, sizeof(range)))
4648 			r = -EFAULT;
4649 		break;
4650 	}
4651 #endif
4652 	default:
4653 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4654 	}
4655 out:
4656 	mutex_unlock(&vcpu->mutex);
4657 	kfree(fpu);
4658 	kfree(kvm_sregs);
4659 	return r;
4660 }
4661 
4662 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4663 static long kvm_vcpu_compat_ioctl(struct file *filp,
4664 				  unsigned int ioctl, unsigned long arg)
4665 {
4666 	struct kvm_vcpu *vcpu = filp->private_data;
4667 	void __user *argp = compat_ptr(arg);
4668 	int r;
4669 
4670 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4671 		return -EIO;
4672 
4673 	switch (ioctl) {
4674 	case KVM_SET_SIGNAL_MASK: {
4675 		struct kvm_signal_mask __user *sigmask_arg = argp;
4676 		struct kvm_signal_mask kvm_sigmask;
4677 		sigset_t sigset;
4678 
4679 		if (argp) {
4680 			r = -EFAULT;
4681 			if (copy_from_user(&kvm_sigmask, argp,
4682 					   sizeof(kvm_sigmask)))
4683 				goto out;
4684 			r = -EINVAL;
4685 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4686 				goto out;
4687 			r = -EFAULT;
4688 			if (get_compat_sigset(&sigset,
4689 					      (compat_sigset_t __user *)sigmask_arg->sigset))
4690 				goto out;
4691 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4692 		} else
4693 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4694 		break;
4695 	}
4696 	default:
4697 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4698 	}
4699 
4700 out:
4701 	return r;
4702 }
4703 #endif
4704 
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4705 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4706 {
4707 	struct kvm_device *dev = filp->private_data;
4708 
4709 	if (dev->ops->mmap)
4710 		return dev->ops->mmap(dev, vma);
4711 
4712 	return -ENODEV;
4713 }
4714 
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4715 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4716 				 int (*accessor)(struct kvm_device *dev,
4717 						 struct kvm_device_attr *attr),
4718 				 unsigned long arg)
4719 {
4720 	struct kvm_device_attr attr;
4721 
4722 	if (!accessor)
4723 		return -EPERM;
4724 
4725 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4726 		return -EFAULT;
4727 
4728 	return accessor(dev, &attr);
4729 }
4730 
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4731 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4732 			     unsigned long arg)
4733 {
4734 	struct kvm_device *dev = filp->private_data;
4735 
4736 	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4737 		return -EIO;
4738 
4739 	switch (ioctl) {
4740 	case KVM_SET_DEVICE_ATTR:
4741 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4742 	case KVM_GET_DEVICE_ATTR:
4743 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4744 	case KVM_HAS_DEVICE_ATTR:
4745 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4746 	default:
4747 		if (dev->ops->ioctl)
4748 			return dev->ops->ioctl(dev, ioctl, arg);
4749 
4750 		return -ENOTTY;
4751 	}
4752 }
4753 
kvm_device_release(struct inode * inode,struct file * filp)4754 static int kvm_device_release(struct inode *inode, struct file *filp)
4755 {
4756 	struct kvm_device *dev = filp->private_data;
4757 	struct kvm *kvm = dev->kvm;
4758 
4759 	if (dev->ops->release) {
4760 		mutex_lock(&kvm->lock);
4761 		list_del_rcu(&dev->vm_node);
4762 		synchronize_rcu();
4763 		dev->ops->release(dev);
4764 		mutex_unlock(&kvm->lock);
4765 	}
4766 
4767 	kvm_put_kvm(kvm);
4768 	return 0;
4769 }
4770 
4771 static struct file_operations kvm_device_fops = {
4772 	.unlocked_ioctl = kvm_device_ioctl,
4773 	.release = kvm_device_release,
4774 	KVM_COMPAT(kvm_device_ioctl),
4775 	.mmap = kvm_device_mmap,
4776 };
4777 
kvm_device_from_filp(struct file * filp)4778 struct kvm_device *kvm_device_from_filp(struct file *filp)
4779 {
4780 	if (filp->f_op != &kvm_device_fops)
4781 		return NULL;
4782 
4783 	return filp->private_data;
4784 }
4785 
4786 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4787 #ifdef CONFIG_KVM_MPIC
4788 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4789 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4790 #endif
4791 };
4792 
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4793 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4794 {
4795 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4796 		return -ENOSPC;
4797 
4798 	if (kvm_device_ops_table[type] != NULL)
4799 		return -EEXIST;
4800 
4801 	kvm_device_ops_table[type] = ops;
4802 	return 0;
4803 }
4804 
kvm_unregister_device_ops(u32 type)4805 void kvm_unregister_device_ops(u32 type)
4806 {
4807 	if (kvm_device_ops_table[type] != NULL)
4808 		kvm_device_ops_table[type] = NULL;
4809 }
4810 
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4811 static int kvm_ioctl_create_device(struct kvm *kvm,
4812 				   struct kvm_create_device *cd)
4813 {
4814 	const struct kvm_device_ops *ops;
4815 	struct kvm_device *dev;
4816 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4817 	int type;
4818 	int ret;
4819 
4820 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4821 		return -ENODEV;
4822 
4823 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4824 	ops = kvm_device_ops_table[type];
4825 	if (ops == NULL)
4826 		return -ENODEV;
4827 
4828 	if (test)
4829 		return 0;
4830 
4831 	dev = kzalloc_obj(*dev, GFP_KERNEL_ACCOUNT);
4832 	if (!dev)
4833 		return -ENOMEM;
4834 
4835 	dev->ops = ops;
4836 	dev->kvm = kvm;
4837 
4838 	mutex_lock(&kvm->lock);
4839 	ret = ops->create(dev, type);
4840 	if (ret < 0) {
4841 		mutex_unlock(&kvm->lock);
4842 		kfree(dev);
4843 		return ret;
4844 	}
4845 	list_add_rcu(&dev->vm_node, &kvm->devices);
4846 	mutex_unlock(&kvm->lock);
4847 
4848 	if (ops->init)
4849 		ops->init(dev);
4850 
4851 	kvm_get_kvm(kvm);
4852 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4853 	if (ret < 0) {
4854 		kvm_put_kvm_no_destroy(kvm);
4855 		mutex_lock(&kvm->lock);
4856 		list_del_rcu(&dev->vm_node);
4857 		synchronize_rcu();
4858 		if (ops->release)
4859 			ops->release(dev);
4860 		mutex_unlock(&kvm->lock);
4861 		if (ops->destroy)
4862 			ops->destroy(dev);
4863 		return ret;
4864 	}
4865 
4866 	cd->fd = ret;
4867 	return 0;
4868 }
4869 
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)4870 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4871 {
4872 	switch (arg) {
4873 	case KVM_CAP_SYNC_MMU:
4874 	case KVM_CAP_USER_MEMORY:
4875 	case KVM_CAP_USER_MEMORY2:
4876 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4877 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4878 	case KVM_CAP_INTERNAL_ERROR_DATA:
4879 #ifdef CONFIG_HAVE_KVM_MSI
4880 	case KVM_CAP_SIGNAL_MSI:
4881 #endif
4882 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4883 	case KVM_CAP_IRQFD:
4884 #endif
4885 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4886 	case KVM_CAP_CHECK_EXTENSION_VM:
4887 	case KVM_CAP_ENABLE_CAP_VM:
4888 	case KVM_CAP_HALT_POLL:
4889 		return 1;
4890 #ifdef CONFIG_KVM_MMIO
4891 	case KVM_CAP_COALESCED_MMIO:
4892 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4893 	case KVM_CAP_COALESCED_PIO:
4894 		return 1;
4895 #endif
4896 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4897 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4898 		return KVM_DIRTY_LOG_MANUAL_CAPS;
4899 #endif
4900 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4901 	case KVM_CAP_IRQ_ROUTING:
4902 		return KVM_MAX_IRQ_ROUTES;
4903 #endif
4904 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4905 	case KVM_CAP_MULTI_ADDRESS_SPACE:
4906 		if (kvm)
4907 			return kvm_arch_nr_memslot_as_ids(kvm);
4908 		return KVM_MAX_NR_ADDRESS_SPACES;
4909 #endif
4910 	case KVM_CAP_NR_MEMSLOTS:
4911 		return KVM_USER_MEM_SLOTS;
4912 	case KVM_CAP_DIRTY_LOG_RING:
4913 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4914 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4915 #else
4916 		return 0;
4917 #endif
4918 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4919 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4920 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4921 #else
4922 		return 0;
4923 #endif
4924 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4925 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4926 #endif
4927 	case KVM_CAP_BINARY_STATS_FD:
4928 	case KVM_CAP_SYSTEM_EVENT_DATA:
4929 	case KVM_CAP_DEVICE_CTRL:
4930 		return 1;
4931 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4932 	case KVM_CAP_MEMORY_ATTRIBUTES:
4933 		return kvm_supported_mem_attributes(kvm);
4934 #endif
4935 #ifdef CONFIG_KVM_GUEST_MEMFD
4936 	case KVM_CAP_GUEST_MEMFD:
4937 		return 1;
4938 	case KVM_CAP_GUEST_MEMFD_FLAGS:
4939 		return kvm_gmem_get_supported_flags(kvm);
4940 #endif
4941 	default:
4942 		break;
4943 	}
4944 	return kvm_vm_ioctl_check_extension(kvm, arg);
4945 }
4946 
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4947 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4948 {
4949 	int r;
4950 
4951 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4952 		return -EINVAL;
4953 
4954 	/* the size should be power of 2 */
4955 	if (!size || (size & (size - 1)))
4956 		return -EINVAL;
4957 
4958 	/* Should be bigger to keep the reserved entries, or a page */
4959 	if (size < kvm_dirty_ring_get_rsvd_entries(kvm) *
4960 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4961 		return -EINVAL;
4962 
4963 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4964 	    sizeof(struct kvm_dirty_gfn))
4965 		return -E2BIG;
4966 
4967 	/* We only allow it to set once */
4968 	if (kvm->dirty_ring_size)
4969 		return -EINVAL;
4970 
4971 	mutex_lock(&kvm->lock);
4972 
4973 	if (kvm->created_vcpus) {
4974 		/* We don't allow to change this value after vcpu created */
4975 		r = -EINVAL;
4976 	} else {
4977 		kvm->dirty_ring_size = size;
4978 		r = 0;
4979 	}
4980 
4981 	mutex_unlock(&kvm->lock);
4982 	return r;
4983 }
4984 
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4985 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4986 {
4987 	unsigned long i;
4988 	struct kvm_vcpu *vcpu;
4989 	int cleared = 0, r;
4990 
4991 	if (!kvm->dirty_ring_size)
4992 		return -EINVAL;
4993 
4994 	mutex_lock(&kvm->slots_lock);
4995 
4996 	kvm_for_each_vcpu(i, vcpu, kvm) {
4997 		r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared);
4998 		if (r)
4999 			break;
5000 	}
5001 
5002 	mutex_unlock(&kvm->slots_lock);
5003 
5004 	if (cleared)
5005 		kvm_flush_remote_tlbs(kvm);
5006 
5007 	return cleared;
5008 }
5009 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)5010 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5011 						  struct kvm_enable_cap *cap)
5012 {
5013 	return -EINVAL;
5014 }
5015 
kvm_are_all_memslots_empty(struct kvm * kvm)5016 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5017 {
5018 	int i;
5019 
5020 	lockdep_assert_held(&kvm->slots_lock);
5021 
5022 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5023 		if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5024 			return false;
5025 	}
5026 
5027 	return true;
5028 }
5029 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
5030 
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)5031 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5032 					   struct kvm_enable_cap *cap)
5033 {
5034 	switch (cap->cap) {
5035 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5036 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5037 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5038 
5039 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5040 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5041 
5042 		if (cap->flags || (cap->args[0] & ~allowed_options))
5043 			return -EINVAL;
5044 		kvm->manual_dirty_log_protect = cap->args[0];
5045 		return 0;
5046 	}
5047 #endif
5048 	case KVM_CAP_HALT_POLL: {
5049 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5050 			return -EINVAL;
5051 
5052 		kvm->max_halt_poll_ns = cap->args[0];
5053 
5054 		/*
5055 		 * Ensure kvm->override_halt_poll_ns does not become visible
5056 		 * before kvm->max_halt_poll_ns.
5057 		 *
5058 		 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5059 		 */
5060 		smp_wmb();
5061 		kvm->override_halt_poll_ns = true;
5062 
5063 		return 0;
5064 	}
5065 	case KVM_CAP_DIRTY_LOG_RING:
5066 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5067 		if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5068 			return -EINVAL;
5069 
5070 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5071 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5072 		int r = -EINVAL;
5073 
5074 		if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5075 		    !kvm->dirty_ring_size || cap->flags)
5076 			return r;
5077 
5078 		mutex_lock(&kvm->slots_lock);
5079 
5080 		/*
5081 		 * For simplicity, allow enabling ring+bitmap if and only if
5082 		 * there are no memslots, e.g. to ensure all memslots allocate
5083 		 * a bitmap after the capability is enabled.
5084 		 */
5085 		if (kvm_are_all_memslots_empty(kvm)) {
5086 			kvm->dirty_ring_with_bitmap = true;
5087 			r = 0;
5088 		}
5089 
5090 		mutex_unlock(&kvm->slots_lock);
5091 
5092 		return r;
5093 	}
5094 	default:
5095 		return kvm_vm_ioctl_enable_cap(kvm, cap);
5096 	}
5097 }
5098 
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)5099 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5100 			      size_t size, loff_t *offset)
5101 {
5102 	struct kvm *kvm = file->private_data;
5103 
5104 	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5105 				&kvm_vm_stats_desc[0], &kvm->stat,
5106 				sizeof(kvm->stat), user_buffer, size, offset);
5107 }
5108 
kvm_vm_stats_release(struct inode * inode,struct file * file)5109 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5110 {
5111 	struct kvm *kvm = file->private_data;
5112 
5113 	kvm_put_kvm(kvm);
5114 	return 0;
5115 }
5116 
5117 static const struct file_operations kvm_vm_stats_fops = {
5118 	.owner = THIS_MODULE,
5119 	.read = kvm_vm_stats_read,
5120 	.release = kvm_vm_stats_release,
5121 	.llseek = noop_llseek,
5122 };
5123 
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)5124 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5125 {
5126 	int fd;
5127 	struct file *file;
5128 
5129 	fd = get_unused_fd_flags(O_CLOEXEC);
5130 	if (fd < 0)
5131 		return fd;
5132 
5133 	file = anon_inode_getfile_fmode("kvm-vm-stats",
5134 			&kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD);
5135 	if (IS_ERR(file)) {
5136 		put_unused_fd(fd);
5137 		return PTR_ERR(file);
5138 	}
5139 
5140 	kvm_get_kvm(kvm);
5141 	fd_install(fd, file);
5142 
5143 	return fd;
5144 }
5145 
5146 #define SANITY_CHECK_MEM_REGION_FIELD(field)					\
5147 do {										\
5148 	BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=		\
5149 		     offsetof(struct kvm_userspace_memory_region2, field));	\
5150 	BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) !=		\
5151 		     sizeof_field(struct kvm_userspace_memory_region2, field));	\
5152 } while (0)
5153 
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5154 static long kvm_vm_ioctl(struct file *filp,
5155 			   unsigned int ioctl, unsigned long arg)
5156 {
5157 	struct kvm *kvm = filp->private_data;
5158 	void __user *argp = (void __user *)arg;
5159 	int r;
5160 
5161 	if (kvm->mm != current->mm || kvm->vm_dead)
5162 		return -EIO;
5163 	switch (ioctl) {
5164 	case KVM_CREATE_VCPU:
5165 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5166 		break;
5167 	case KVM_ENABLE_CAP: {
5168 		struct kvm_enable_cap cap;
5169 
5170 		r = -EFAULT;
5171 		if (copy_from_user(&cap, argp, sizeof(cap)))
5172 			goto out;
5173 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5174 		break;
5175 	}
5176 	case KVM_SET_USER_MEMORY_REGION2:
5177 	case KVM_SET_USER_MEMORY_REGION: {
5178 		struct kvm_userspace_memory_region2 mem;
5179 		unsigned long size;
5180 
5181 		if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5182 			/*
5183 			 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5184 			 * accessed, but avoid leaking kernel memory in case of a bug.
5185 			 */
5186 			memset(&mem, 0, sizeof(mem));
5187 			size = sizeof(struct kvm_userspace_memory_region);
5188 		} else {
5189 			size = sizeof(struct kvm_userspace_memory_region2);
5190 		}
5191 
5192 		/* Ensure the common parts of the two structs are identical. */
5193 		SANITY_CHECK_MEM_REGION_FIELD(slot);
5194 		SANITY_CHECK_MEM_REGION_FIELD(flags);
5195 		SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5196 		SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5197 		SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5198 
5199 		r = -EFAULT;
5200 		if (copy_from_user(&mem, argp, size))
5201 			goto out;
5202 
5203 		r = -EINVAL;
5204 		if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5205 		    (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5206 			goto out;
5207 
5208 		r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5209 		break;
5210 	}
5211 	case KVM_GET_DIRTY_LOG: {
5212 		struct kvm_dirty_log log;
5213 
5214 		r = -EFAULT;
5215 		if (copy_from_user(&log, argp, sizeof(log)))
5216 			goto out;
5217 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5218 		break;
5219 	}
5220 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5221 	case KVM_CLEAR_DIRTY_LOG: {
5222 		struct kvm_clear_dirty_log log;
5223 
5224 		r = -EFAULT;
5225 		if (copy_from_user(&log, argp, sizeof(log)))
5226 			goto out;
5227 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5228 		break;
5229 	}
5230 #endif
5231 #ifdef CONFIG_KVM_MMIO
5232 	case KVM_REGISTER_COALESCED_MMIO: {
5233 		struct kvm_coalesced_mmio_zone zone;
5234 
5235 		r = -EFAULT;
5236 		if (copy_from_user(&zone, argp, sizeof(zone)))
5237 			goto out;
5238 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5239 		break;
5240 	}
5241 	case KVM_UNREGISTER_COALESCED_MMIO: {
5242 		struct kvm_coalesced_mmio_zone zone;
5243 
5244 		r = -EFAULT;
5245 		if (copy_from_user(&zone, argp, sizeof(zone)))
5246 			goto out;
5247 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5248 		break;
5249 	}
5250 #endif
5251 	case KVM_IRQFD: {
5252 		struct kvm_irqfd data;
5253 
5254 		r = -EFAULT;
5255 		if (copy_from_user(&data, argp, sizeof(data)))
5256 			goto out;
5257 		r = kvm_irqfd(kvm, &data);
5258 		break;
5259 	}
5260 	case KVM_IOEVENTFD: {
5261 		struct kvm_ioeventfd data;
5262 
5263 		r = -EFAULT;
5264 		if (copy_from_user(&data, argp, sizeof(data)))
5265 			goto out;
5266 		r = kvm_ioeventfd(kvm, &data);
5267 		break;
5268 	}
5269 #ifdef CONFIG_HAVE_KVM_MSI
5270 	case KVM_SIGNAL_MSI: {
5271 		struct kvm_msi msi;
5272 
5273 		r = -EFAULT;
5274 		if (copy_from_user(&msi, argp, sizeof(msi)))
5275 			goto out;
5276 		r = kvm_send_userspace_msi(kvm, &msi);
5277 		break;
5278 	}
5279 #endif
5280 #ifdef __KVM_HAVE_IRQ_LINE
5281 	case KVM_IRQ_LINE_STATUS:
5282 	case KVM_IRQ_LINE: {
5283 		struct kvm_irq_level irq_event;
5284 
5285 		r = -EFAULT;
5286 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5287 			goto out;
5288 
5289 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5290 					ioctl == KVM_IRQ_LINE_STATUS);
5291 		if (r)
5292 			goto out;
5293 
5294 		r = -EFAULT;
5295 		if (ioctl == KVM_IRQ_LINE_STATUS) {
5296 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5297 				goto out;
5298 		}
5299 
5300 		r = 0;
5301 		break;
5302 	}
5303 #endif
5304 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5305 	case KVM_SET_GSI_ROUTING: {
5306 		struct kvm_irq_routing routing;
5307 		struct kvm_irq_routing __user *urouting;
5308 		struct kvm_irq_routing_entry *entries = NULL;
5309 
5310 		r = -EFAULT;
5311 		if (copy_from_user(&routing, argp, sizeof(routing)))
5312 			goto out;
5313 		r = -EINVAL;
5314 		if (!kvm_arch_can_set_irq_routing(kvm))
5315 			goto out;
5316 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
5317 			goto out;
5318 		if (routing.flags)
5319 			goto out;
5320 		if (routing.nr) {
5321 			urouting = argp;
5322 			entries = vmemdup_array_user(urouting->entries,
5323 						     routing.nr, sizeof(*entries));
5324 			if (IS_ERR(entries)) {
5325 				r = PTR_ERR(entries);
5326 				goto out;
5327 			}
5328 		}
5329 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
5330 					routing.flags);
5331 		kvfree(entries);
5332 		break;
5333 	}
5334 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5335 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5336 	case KVM_SET_MEMORY_ATTRIBUTES: {
5337 		struct kvm_memory_attributes attrs;
5338 
5339 		r = -EFAULT;
5340 		if (copy_from_user(&attrs, argp, sizeof(attrs)))
5341 			goto out;
5342 
5343 		r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5344 		break;
5345 	}
5346 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5347 	case KVM_CREATE_DEVICE: {
5348 		struct kvm_create_device cd;
5349 
5350 		r = -EFAULT;
5351 		if (copy_from_user(&cd, argp, sizeof(cd)))
5352 			goto out;
5353 
5354 		r = kvm_ioctl_create_device(kvm, &cd);
5355 		if (r)
5356 			goto out;
5357 
5358 		r = -EFAULT;
5359 		if (copy_to_user(argp, &cd, sizeof(cd)))
5360 			goto out;
5361 
5362 		r = 0;
5363 		break;
5364 	}
5365 	case KVM_CHECK_EXTENSION:
5366 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5367 		break;
5368 	case KVM_RESET_DIRTY_RINGS:
5369 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5370 		break;
5371 	case KVM_GET_STATS_FD:
5372 		r = kvm_vm_ioctl_get_stats_fd(kvm);
5373 		break;
5374 #ifdef CONFIG_KVM_GUEST_MEMFD
5375 	case KVM_CREATE_GUEST_MEMFD: {
5376 		struct kvm_create_guest_memfd guest_memfd;
5377 
5378 		r = -EFAULT;
5379 		if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5380 			goto out;
5381 
5382 		r = kvm_gmem_create(kvm, &guest_memfd);
5383 		break;
5384 	}
5385 #endif
5386 	default:
5387 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5388 	}
5389 out:
5390 	return r;
5391 }
5392 
5393 #ifdef CONFIG_KVM_COMPAT
5394 struct compat_kvm_dirty_log {
5395 	__u32 slot;
5396 	__u32 padding1;
5397 	union {
5398 		compat_uptr_t dirty_bitmap; /* one bit per page */
5399 		__u64 padding2;
5400 	};
5401 };
5402 
5403 struct compat_kvm_clear_dirty_log {
5404 	__u32 slot;
5405 	__u32 num_pages;
5406 	__u64 first_page;
5407 	union {
5408 		compat_uptr_t dirty_bitmap; /* one bit per page */
5409 		__u64 padding2;
5410 	};
5411 };
5412 
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5413 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5414 				     unsigned long arg)
5415 {
5416 	return -ENOTTY;
5417 }
5418 
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5419 static long kvm_vm_compat_ioctl(struct file *filp,
5420 			   unsigned int ioctl, unsigned long arg)
5421 {
5422 	struct kvm *kvm = filp->private_data;
5423 	int r;
5424 
5425 	if (kvm->mm != current->mm || kvm->vm_dead)
5426 		return -EIO;
5427 
5428 	r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5429 	if (r != -ENOTTY)
5430 		return r;
5431 
5432 	switch (ioctl) {
5433 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5434 	case KVM_CLEAR_DIRTY_LOG: {
5435 		struct compat_kvm_clear_dirty_log compat_log;
5436 		struct kvm_clear_dirty_log log;
5437 
5438 		if (copy_from_user(&compat_log, (void __user *)arg,
5439 				   sizeof(compat_log)))
5440 			return -EFAULT;
5441 		log.slot	 = compat_log.slot;
5442 		log.num_pages	 = compat_log.num_pages;
5443 		log.first_page	 = compat_log.first_page;
5444 		log.padding2	 = compat_log.padding2;
5445 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5446 
5447 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5448 		break;
5449 	}
5450 #endif
5451 	case KVM_GET_DIRTY_LOG: {
5452 		struct compat_kvm_dirty_log compat_log;
5453 		struct kvm_dirty_log log;
5454 
5455 		if (copy_from_user(&compat_log, (void __user *)arg,
5456 				   sizeof(compat_log)))
5457 			return -EFAULT;
5458 		log.slot	 = compat_log.slot;
5459 		log.padding1	 = compat_log.padding1;
5460 		log.padding2	 = compat_log.padding2;
5461 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5462 
5463 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5464 		break;
5465 	}
5466 	default:
5467 		r = kvm_vm_ioctl(filp, ioctl, arg);
5468 	}
5469 	return r;
5470 }
5471 #endif
5472 
5473 static struct file_operations kvm_vm_fops = {
5474 	.release        = kvm_vm_release,
5475 	.unlocked_ioctl = kvm_vm_ioctl,
5476 	.llseek		= noop_llseek,
5477 	KVM_COMPAT(kvm_vm_compat_ioctl),
5478 };
5479 
file_is_kvm(struct file * file)5480 bool file_is_kvm(struct file *file)
5481 {
5482 	return file && file->f_op == &kvm_vm_fops;
5483 }
5484 EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
5485 
kvm_dev_ioctl_create_vm(unsigned long type)5486 static int kvm_dev_ioctl_create_vm(unsigned long type)
5487 {
5488 	char fdname[ITOA_MAX_LEN + 1];
5489 	int r, fd;
5490 	struct kvm *kvm;
5491 	struct file *file;
5492 
5493 	fd = get_unused_fd_flags(O_CLOEXEC);
5494 	if (fd < 0)
5495 		return fd;
5496 
5497 	snprintf(fdname, sizeof(fdname), "%d", fd);
5498 
5499 	kvm = kvm_create_vm(type, fdname);
5500 	if (IS_ERR(kvm)) {
5501 		r = PTR_ERR(kvm);
5502 		goto put_fd;
5503 	}
5504 
5505 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5506 	if (IS_ERR(file)) {
5507 		r = PTR_ERR(file);
5508 		goto put_kvm;
5509 	}
5510 
5511 	/*
5512 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5513 	 * already set, with ->release() being kvm_vm_release().  In error
5514 	 * cases it will be called by the final fput(file) and will take
5515 	 * care of doing kvm_put_kvm(kvm).
5516 	 */
5517 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5518 
5519 	fd_install(fd, file);
5520 	return fd;
5521 
5522 put_kvm:
5523 	kvm_put_kvm(kvm);
5524 put_fd:
5525 	put_unused_fd(fd);
5526 	return r;
5527 }
5528 
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5529 static long kvm_dev_ioctl(struct file *filp,
5530 			  unsigned int ioctl, unsigned long arg)
5531 {
5532 	int r = -EINVAL;
5533 
5534 	switch (ioctl) {
5535 	case KVM_GET_API_VERSION:
5536 		if (arg)
5537 			goto out;
5538 		r = KVM_API_VERSION;
5539 		break;
5540 	case KVM_CREATE_VM:
5541 		r = kvm_dev_ioctl_create_vm(arg);
5542 		break;
5543 	case KVM_CHECK_EXTENSION:
5544 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5545 		break;
5546 	case KVM_GET_VCPU_MMAP_SIZE:
5547 		if (arg)
5548 			goto out;
5549 		r = PAGE_SIZE;     /* struct kvm_run */
5550 #ifdef CONFIG_X86
5551 		r += PAGE_SIZE;    /* pio data page */
5552 #endif
5553 #ifdef CONFIG_KVM_MMIO
5554 		r += PAGE_SIZE;    /* coalesced mmio ring page */
5555 #endif
5556 		break;
5557 	default:
5558 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
5559 	}
5560 out:
5561 	return r;
5562 }
5563 
5564 static struct file_operations kvm_chardev_ops = {
5565 	.unlocked_ioctl = kvm_dev_ioctl,
5566 	.llseek		= noop_llseek,
5567 	KVM_COMPAT(kvm_dev_ioctl),
5568 };
5569 
5570 static struct miscdevice kvm_dev = {
5571 	KVM_MINOR,
5572 	"kvm",
5573 	&kvm_chardev_ops,
5574 };
5575 
5576 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5577 bool enable_virt_at_load = true;
5578 module_param(enable_virt_at_load, bool, 0444);
5579 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
5580 
5581 __visible bool kvm_rebooting;
5582 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
5583 
5584 static DEFINE_PER_CPU(bool, virtualization_enabled);
5585 static DEFINE_MUTEX(kvm_usage_lock);
5586 static int kvm_usage_count;
5587 
kvm_arch_enable_virtualization(void)5588 __weak void kvm_arch_enable_virtualization(void)
5589 {
5590 
5591 }
5592 
kvm_arch_disable_virtualization(void)5593 __weak void kvm_arch_disable_virtualization(void)
5594 {
5595 
5596 }
5597 
kvm_enable_virtualization_cpu(void)5598 static int kvm_enable_virtualization_cpu(void)
5599 {
5600 	if (__this_cpu_read(virtualization_enabled))
5601 		return 0;
5602 
5603 	if (kvm_arch_enable_virtualization_cpu()) {
5604 		pr_info("kvm: enabling virtualization on CPU%d failed\n",
5605 			raw_smp_processor_id());
5606 		return -EIO;
5607 	}
5608 
5609 	__this_cpu_write(virtualization_enabled, true);
5610 	return 0;
5611 }
5612 
kvm_online_cpu(unsigned int cpu)5613 static int kvm_online_cpu(unsigned int cpu)
5614 {
5615 	/*
5616 	 * Abort the CPU online process if hardware virtualization cannot
5617 	 * be enabled. Otherwise running VMs would encounter unrecoverable
5618 	 * errors when scheduled to this CPU.
5619 	 */
5620 	return kvm_enable_virtualization_cpu();
5621 }
5622 
kvm_disable_virtualization_cpu(void * ign)5623 static void kvm_disable_virtualization_cpu(void *ign)
5624 {
5625 	if (!__this_cpu_read(virtualization_enabled))
5626 		return;
5627 
5628 	kvm_arch_disable_virtualization_cpu();
5629 
5630 	__this_cpu_write(virtualization_enabled, false);
5631 }
5632 
kvm_offline_cpu(unsigned int cpu)5633 static int kvm_offline_cpu(unsigned int cpu)
5634 {
5635 	kvm_disable_virtualization_cpu(NULL);
5636 	return 0;
5637 }
5638 
kvm_shutdown(void * data)5639 static void kvm_shutdown(void *data)
5640 {
5641 	/*
5642 	 * Disable hardware virtualization and set kvm_rebooting to indicate
5643 	 * that KVM has asynchronously disabled hardware virtualization, i.e.
5644 	 * that relevant errors and exceptions aren't entirely unexpected.
5645 	 * Some flavors of hardware virtualization need to be disabled before
5646 	 * transferring control to firmware (to perform shutdown/reboot), e.g.
5647 	 * on x86, virtualization can block INIT interrupts, which are used by
5648 	 * firmware to pull APs back under firmware control.  Note, this path
5649 	 * is used for both shutdown and reboot scenarios, i.e. neither name is
5650 	 * 100% comprehensive.
5651 	 */
5652 	pr_info("kvm: exiting hardware virtualization\n");
5653 	kvm_rebooting = true;
5654 	on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
5655 }
5656 
kvm_suspend(void * data)5657 static int kvm_suspend(void *data)
5658 {
5659 	/*
5660 	 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5661 	 * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
5662 	 * count is stable.  Assert that kvm_usage_lock is not held to ensure
5663 	 * the system isn't suspended while KVM is enabling hardware.  Hardware
5664 	 * enabling can be preempted, but the task cannot be frozen until it has
5665 	 * dropped all locks (userspace tasks are frozen via a fake signal).
5666 	 */
5667 	lockdep_assert_not_held(&kvm_usage_lock);
5668 	lockdep_assert_irqs_disabled();
5669 
5670 	kvm_disable_virtualization_cpu(NULL);
5671 	return 0;
5672 }
5673 
kvm_resume(void * data)5674 static void kvm_resume(void *data)
5675 {
5676 	lockdep_assert_not_held(&kvm_usage_lock);
5677 	lockdep_assert_irqs_disabled();
5678 
5679 	WARN_ON_ONCE(kvm_enable_virtualization_cpu());
5680 }
5681 
5682 static const struct syscore_ops kvm_syscore_ops = {
5683 	.suspend = kvm_suspend,
5684 	.resume = kvm_resume,
5685 	.shutdown = kvm_shutdown,
5686 };
5687 
5688 static struct syscore kvm_syscore = {
5689 	.ops = &kvm_syscore_ops,
5690 };
5691 
kvm_enable_virtualization(void)5692 int kvm_enable_virtualization(void)
5693 {
5694 	int r;
5695 
5696 	guard(mutex)(&kvm_usage_lock);
5697 
5698 	if (kvm_usage_count++)
5699 		return 0;
5700 
5701 	kvm_arch_enable_virtualization();
5702 
5703 	r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
5704 			      kvm_online_cpu, kvm_offline_cpu);
5705 	if (r)
5706 		goto err_cpuhp;
5707 
5708 	register_syscore(&kvm_syscore);
5709 
5710 	/*
5711 	 * Undo virtualization enabling and bail if the system is going down.
5712 	 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5713 	 * possible for an in-flight operation to enable virtualization after
5714 	 * syscore_shutdown() is called, i.e. without kvm_shutdown() being
5715 	 * invoked.  Note, this relies on system_state being set _before_
5716 	 * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
5717 	 * or this CPU observes the impending shutdown.  Which is why KVM uses
5718 	 * a syscore ops hook instead of registering a dedicated reboot
5719 	 * notifier (the latter runs before system_state is updated).
5720 	 */
5721 	if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5722 	    system_state == SYSTEM_RESTART) {
5723 		r = -EBUSY;
5724 		goto err_rebooting;
5725 	}
5726 
5727 	return 0;
5728 
5729 err_rebooting:
5730 	unregister_syscore(&kvm_syscore);
5731 	cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5732 err_cpuhp:
5733 	kvm_arch_disable_virtualization();
5734 	--kvm_usage_count;
5735 	return r;
5736 }
5737 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
5738 
kvm_disable_virtualization(void)5739 void kvm_disable_virtualization(void)
5740 {
5741 	guard(mutex)(&kvm_usage_lock);
5742 
5743 	if (--kvm_usage_count)
5744 		return;
5745 
5746 	unregister_syscore(&kvm_syscore);
5747 	cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5748 	kvm_arch_disable_virtualization();
5749 }
5750 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
5751 
kvm_init_virtualization(void)5752 static int kvm_init_virtualization(void)
5753 {
5754 	if (enable_virt_at_load)
5755 		return kvm_enable_virtualization();
5756 
5757 	return 0;
5758 }
5759 
kvm_uninit_virtualization(void)5760 static void kvm_uninit_virtualization(void)
5761 {
5762 	if (enable_virt_at_load)
5763 		kvm_disable_virtualization();
5764 }
5765 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
kvm_init_virtualization(void)5766 static int kvm_init_virtualization(void)
5767 {
5768 	return 0;
5769 }
5770 
kvm_uninit_virtualization(void)5771 static void kvm_uninit_virtualization(void)
5772 {
5773 
5774 }
5775 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5776 
kvm_iodevice_destructor(struct kvm_io_device * dev)5777 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5778 {
5779 	if (dev->ops->destructor)
5780 		dev->ops->destructor(dev);
5781 }
5782 
kvm_io_bus_destroy(struct kvm_io_bus * bus)5783 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5784 {
5785 	int i;
5786 
5787 	for (i = 0; i < bus->dev_count; i++) {
5788 		struct kvm_io_device *pos = bus->range[i].dev;
5789 
5790 		kvm_iodevice_destructor(pos);
5791 	}
5792 	kfree(bus);
5793 }
5794 
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5795 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5796 				 const struct kvm_io_range *r2)
5797 {
5798 	gpa_t addr1 = r1->addr;
5799 	gpa_t addr2 = r2->addr;
5800 
5801 	if (addr1 < addr2)
5802 		return -1;
5803 
5804 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
5805 	 * accept any overlapping write.  Any order is acceptable for
5806 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5807 	 * we process all of them.
5808 	 */
5809 	if (r2->len) {
5810 		addr1 += r1->len;
5811 		addr2 += r2->len;
5812 	}
5813 
5814 	if (addr1 > addr2)
5815 		return 1;
5816 
5817 	return 0;
5818 }
5819 
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5820 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5821 {
5822 	return kvm_io_bus_cmp(p1, p2);
5823 }
5824 
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5825 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5826 			     gpa_t addr, int len)
5827 {
5828 	struct kvm_io_range *range, key;
5829 	int off;
5830 
5831 	key = (struct kvm_io_range) {
5832 		.addr = addr,
5833 		.len = len,
5834 	};
5835 
5836 	range = bsearch(&key, bus->range, bus->dev_count,
5837 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5838 	if (range == NULL)
5839 		return -ENOENT;
5840 
5841 	off = range - bus->range;
5842 
5843 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5844 		off--;
5845 
5846 	return off;
5847 }
5848 
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5849 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5850 			      struct kvm_io_range *range, const void *val)
5851 {
5852 	int idx;
5853 
5854 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5855 	if (idx < 0)
5856 		return -EOPNOTSUPP;
5857 
5858 	while (idx < bus->dev_count &&
5859 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5860 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5861 					range->len, val))
5862 			return idx;
5863 		idx++;
5864 	}
5865 
5866 	return -EOPNOTSUPP;
5867 }
5868 
kvm_get_bus_srcu(struct kvm * kvm,enum kvm_bus idx)5869 static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
5870 {
5871 	/*
5872 	 * Ensure that any updates to kvm_buses[] observed by the previous vCPU
5873 	 * machine instruction are also visible to the vCPU machine instruction
5874 	 * that triggered this call.
5875 	 */
5876 	smp_mb__after_srcu_read_lock();
5877 
5878 	return srcu_dereference(kvm->buses[idx], &kvm->srcu);
5879 }
5880 
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5881 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5882 		     int len, const void *val)
5883 {
5884 	struct kvm_io_bus *bus;
5885 	struct kvm_io_range range;
5886 	int r;
5887 
5888 	range = (struct kvm_io_range) {
5889 		.addr = addr,
5890 		.len = len,
5891 	};
5892 
5893 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5894 	if (!bus)
5895 		return -ENOMEM;
5896 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5897 	return r < 0 ? r : 0;
5898 }
5899 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
5900 
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5901 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5902 			    gpa_t addr, int len, const void *val, long cookie)
5903 {
5904 	struct kvm_io_bus *bus;
5905 	struct kvm_io_range range;
5906 
5907 	range = (struct kvm_io_range) {
5908 		.addr = addr,
5909 		.len = len,
5910 	};
5911 
5912 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5913 	if (!bus)
5914 		return -ENOMEM;
5915 
5916 	/* First try the device referenced by cookie. */
5917 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5918 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5919 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5920 					val))
5921 			return cookie;
5922 
5923 	/*
5924 	 * cookie contained garbage; fall back to search and return the
5925 	 * correct cookie value.
5926 	 */
5927 	return __kvm_io_bus_write(vcpu, bus, &range, val);
5928 }
5929 
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5930 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5931 			     struct kvm_io_range *range, void *val)
5932 {
5933 	int idx;
5934 
5935 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5936 	if (idx < 0)
5937 		return -EOPNOTSUPP;
5938 
5939 	while (idx < bus->dev_count &&
5940 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5941 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5942 				       range->len, val))
5943 			return idx;
5944 		idx++;
5945 	}
5946 
5947 	return -EOPNOTSUPP;
5948 }
5949 
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5950 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5951 		    int len, void *val)
5952 {
5953 	struct kvm_io_bus *bus;
5954 	struct kvm_io_range range;
5955 	int r;
5956 
5957 	range = (struct kvm_io_range) {
5958 		.addr = addr,
5959 		.len = len,
5960 	};
5961 
5962 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5963 	if (!bus)
5964 		return -ENOMEM;
5965 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5966 	return r < 0 ? r : 0;
5967 }
5968 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
5969 
__free_bus(struct rcu_head * rcu)5970 static void __free_bus(struct rcu_head *rcu)
5971 {
5972 	struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
5973 
5974 	kfree(bus);
5975 }
5976 
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5977 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5978 			    int len, struct kvm_io_device *dev)
5979 {
5980 	int i;
5981 	struct kvm_io_bus *new_bus, *bus;
5982 	struct kvm_io_range range;
5983 
5984 	lockdep_assert_held(&kvm->slots_lock);
5985 
5986 	bus = kvm_get_bus(kvm, bus_idx);
5987 	if (!bus)
5988 		return -ENOMEM;
5989 
5990 	/* exclude ioeventfd which is limited by maximum fd */
5991 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5992 		return -ENOSPC;
5993 
5994 	new_bus = kmalloc_flex(*bus, range, bus->dev_count + 1,
5995 			       GFP_KERNEL_ACCOUNT);
5996 	if (!new_bus)
5997 		return -ENOMEM;
5998 
5999 	range = (struct kvm_io_range) {
6000 		.addr = addr,
6001 		.len = len,
6002 		.dev = dev,
6003 	};
6004 
6005 	for (i = 0; i < bus->dev_count; i++)
6006 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
6007 			break;
6008 
6009 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
6010 	new_bus->dev_count++;
6011 	new_bus->range[i] = range;
6012 	memcpy(new_bus->range + i + 1, bus->range + i,
6013 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
6014 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6015 	call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
6016 
6017 	return 0;
6018 }
6019 
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)6020 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6021 			      struct kvm_io_device *dev)
6022 {
6023 	int i;
6024 	struct kvm_io_bus *new_bus, *bus;
6025 
6026 	lockdep_assert_held(&kvm->slots_lock);
6027 
6028 	bus = kvm_get_bus(kvm, bus_idx);
6029 	if (!bus)
6030 		return 0;
6031 
6032 	for (i = 0; i < bus->dev_count; i++) {
6033 		if (bus->range[i].dev == dev) {
6034 			break;
6035 		}
6036 	}
6037 
6038 	if (i == bus->dev_count)
6039 		return 0;
6040 
6041 	new_bus = kmalloc_flex(*bus, range, bus->dev_count - 1,
6042 			       GFP_KERNEL_ACCOUNT);
6043 	if (new_bus) {
6044 		memcpy(new_bus, bus, struct_size(bus, range, i));
6045 		new_bus->dev_count--;
6046 		memcpy(new_bus->range + i, bus->range + i + 1,
6047 				flex_array_size(new_bus, range, new_bus->dev_count - i));
6048 	}
6049 
6050 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6051 	synchronize_srcu_expedited(&kvm->srcu);
6052 
6053 	/*
6054 	 * If NULL bus is installed, destroy the old bus, including all the
6055 	 * attached devices. Otherwise, destroy the caller's device only.
6056 	 */
6057 	if (!new_bus) {
6058 		pr_err("kvm: failed to shrink bus, removing it completely\n");
6059 		kvm_io_bus_destroy(bus);
6060 		return -ENOMEM;
6061 	}
6062 
6063 	kvm_iodevice_destructor(dev);
6064 	kfree(bus);
6065 	return 0;
6066 }
6067 
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)6068 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6069 					 gpa_t addr)
6070 {
6071 	struct kvm_io_bus *bus;
6072 	int dev_idx, srcu_idx;
6073 	struct kvm_io_device *iodev = NULL;
6074 
6075 	srcu_idx = srcu_read_lock(&kvm->srcu);
6076 
6077 	bus = kvm_get_bus_srcu(kvm, bus_idx);
6078 	if (!bus)
6079 		goto out_unlock;
6080 
6081 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6082 	if (dev_idx < 0)
6083 		goto out_unlock;
6084 
6085 	iodev = bus->range[dev_idx].dev;
6086 
6087 out_unlock:
6088 	srcu_read_unlock(&kvm->srcu, srcu_idx);
6089 
6090 	return iodev;
6091 }
6092 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
6093 
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)6094 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6095 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
6096 			   const char *fmt)
6097 {
6098 	int ret;
6099 	struct kvm_stat_data *stat_data = inode->i_private;
6100 
6101 	/*
6102 	 * The debugfs files are a reference to the kvm struct which
6103         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
6104         * avoids the race between open and the removal of the debugfs directory.
6105 	 */
6106 	if (!kvm_get_kvm_safe(stat_data->kvm))
6107 		return -ENOENT;
6108 
6109 	ret = simple_attr_open(inode, file, get,
6110 			       kvm_stats_debugfs_mode(stat_data->desc) & 0222
6111 			       ? set : NULL, fmt);
6112 	if (ret)
6113 		kvm_put_kvm(stat_data->kvm);
6114 
6115 	return ret;
6116 }
6117 
kvm_debugfs_release(struct inode * inode,struct file * file)6118 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6119 {
6120 	struct kvm_stat_data *stat_data = inode->i_private;
6121 
6122 	simple_attr_release(inode, file);
6123 	kvm_put_kvm(stat_data->kvm);
6124 
6125 	return 0;
6126 }
6127 
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)6128 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6129 {
6130 	*val = *(u64 *)((void *)(&kvm->stat) + offset);
6131 
6132 	return 0;
6133 }
6134 
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)6135 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6136 {
6137 	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
6138 
6139 	return 0;
6140 }
6141 
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)6142 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6143 {
6144 	unsigned long i;
6145 	struct kvm_vcpu *vcpu;
6146 
6147 	*val = 0;
6148 
6149 	kvm_for_each_vcpu(i, vcpu, kvm)
6150 		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
6151 
6152 	return 0;
6153 }
6154 
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)6155 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6156 {
6157 	unsigned long i;
6158 	struct kvm_vcpu *vcpu;
6159 
6160 	kvm_for_each_vcpu(i, vcpu, kvm)
6161 		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6162 
6163 	return 0;
6164 }
6165 
kvm_stat_data_get(void * data,u64 * val)6166 static int kvm_stat_data_get(void *data, u64 *val)
6167 {
6168 	int r = -EFAULT;
6169 	struct kvm_stat_data *stat_data = data;
6170 
6171 	switch (stat_data->kind) {
6172 	case KVM_STAT_VM:
6173 		r = kvm_get_stat_per_vm(stat_data->kvm,
6174 					stat_data->desc->desc.offset, val);
6175 		break;
6176 	case KVM_STAT_VCPU:
6177 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
6178 					  stat_data->desc->desc.offset, val);
6179 		break;
6180 	}
6181 
6182 	return r;
6183 }
6184 
kvm_stat_data_clear(void * data,u64 val)6185 static int kvm_stat_data_clear(void *data, u64 val)
6186 {
6187 	int r = -EFAULT;
6188 	struct kvm_stat_data *stat_data = data;
6189 
6190 	if (val)
6191 		return -EINVAL;
6192 
6193 	switch (stat_data->kind) {
6194 	case KVM_STAT_VM:
6195 		r = kvm_clear_stat_per_vm(stat_data->kvm,
6196 					  stat_data->desc->desc.offset);
6197 		break;
6198 	case KVM_STAT_VCPU:
6199 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6200 					    stat_data->desc->desc.offset);
6201 		break;
6202 	}
6203 
6204 	return r;
6205 }
6206 
kvm_stat_data_open(struct inode * inode,struct file * file)6207 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6208 {
6209 	__simple_attr_check_format("%llu\n", 0ull);
6210 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6211 				kvm_stat_data_clear, "%llu\n");
6212 }
6213 
6214 static const struct file_operations stat_fops_per_vm = {
6215 	.owner = THIS_MODULE,
6216 	.open = kvm_stat_data_open,
6217 	.release = kvm_debugfs_release,
6218 	.read = simple_attr_read,
6219 	.write = simple_attr_write,
6220 };
6221 
vm_stat_get(void * _offset,u64 * val)6222 static int vm_stat_get(void *_offset, u64 *val)
6223 {
6224 	unsigned offset = (long)_offset;
6225 	struct kvm *kvm;
6226 	u64 tmp_val;
6227 
6228 	*val = 0;
6229 	mutex_lock(&kvm_lock);
6230 	list_for_each_entry(kvm, &vm_list, vm_list) {
6231 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6232 		*val += tmp_val;
6233 	}
6234 	mutex_unlock(&kvm_lock);
6235 	return 0;
6236 }
6237 
vm_stat_clear(void * _offset,u64 val)6238 static int vm_stat_clear(void *_offset, u64 val)
6239 {
6240 	unsigned offset = (long)_offset;
6241 	struct kvm *kvm;
6242 
6243 	if (val)
6244 		return -EINVAL;
6245 
6246 	mutex_lock(&kvm_lock);
6247 	list_for_each_entry(kvm, &vm_list, vm_list) {
6248 		kvm_clear_stat_per_vm(kvm, offset);
6249 	}
6250 	mutex_unlock(&kvm_lock);
6251 
6252 	return 0;
6253 }
6254 
6255 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6256 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6257 
vcpu_stat_get(void * _offset,u64 * val)6258 static int vcpu_stat_get(void *_offset, u64 *val)
6259 {
6260 	unsigned offset = (long)_offset;
6261 	struct kvm *kvm;
6262 	u64 tmp_val;
6263 
6264 	*val = 0;
6265 	mutex_lock(&kvm_lock);
6266 	list_for_each_entry(kvm, &vm_list, vm_list) {
6267 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6268 		*val += tmp_val;
6269 	}
6270 	mutex_unlock(&kvm_lock);
6271 	return 0;
6272 }
6273 
vcpu_stat_clear(void * _offset,u64 val)6274 static int vcpu_stat_clear(void *_offset, u64 val)
6275 {
6276 	unsigned offset = (long)_offset;
6277 	struct kvm *kvm;
6278 
6279 	if (val)
6280 		return -EINVAL;
6281 
6282 	mutex_lock(&kvm_lock);
6283 	list_for_each_entry(kvm, &vm_list, vm_list) {
6284 		kvm_clear_stat_per_vcpu(kvm, offset);
6285 	}
6286 	mutex_unlock(&kvm_lock);
6287 
6288 	return 0;
6289 }
6290 
6291 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6292 			"%llu\n");
6293 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6294 
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)6295 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6296 {
6297 	struct kobj_uevent_env *env;
6298 	unsigned long long created, active;
6299 
6300 	if (!kvm_dev.this_device || !kvm)
6301 		return;
6302 
6303 	mutex_lock(&kvm_lock);
6304 	if (type == KVM_EVENT_CREATE_VM) {
6305 		kvm_createvm_count++;
6306 		kvm_active_vms++;
6307 	} else if (type == KVM_EVENT_DESTROY_VM) {
6308 		kvm_active_vms--;
6309 	}
6310 	created = kvm_createvm_count;
6311 	active = kvm_active_vms;
6312 	mutex_unlock(&kvm_lock);
6313 
6314 	env = kzalloc_obj(*env);
6315 	if (!env)
6316 		return;
6317 
6318 	add_uevent_var(env, "CREATED=%llu", created);
6319 	add_uevent_var(env, "COUNT=%llu", active);
6320 
6321 	if (type == KVM_EVENT_CREATE_VM) {
6322 		add_uevent_var(env, "EVENT=create");
6323 		kvm->userspace_pid = task_pid_nr(current);
6324 	} else if (type == KVM_EVENT_DESTROY_VM) {
6325 		add_uevent_var(env, "EVENT=destroy");
6326 	}
6327 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6328 
6329 	if (!IS_ERR(kvm->debugfs_dentry)) {
6330 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6331 
6332 		if (p) {
6333 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6334 			if (!IS_ERR(tmp))
6335 				add_uevent_var(env, "STATS_PATH=%s", tmp);
6336 			kfree(p);
6337 		}
6338 	}
6339 	/* no need for checks, since we are adding at most only 5 keys */
6340 	env->envp[env->envp_idx++] = NULL;
6341 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6342 	kfree(env);
6343 }
6344 
kvm_init_debug(void)6345 static void kvm_init_debug(void)
6346 {
6347 	const struct file_operations *fops;
6348 	const struct _kvm_stats_desc *pdesc;
6349 	int i;
6350 
6351 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6352 
6353 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6354 		pdesc = &kvm_vm_stats_desc[i];
6355 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6356 			fops = &vm_stat_fops;
6357 		else
6358 			fops = &vm_stat_readonly_fops;
6359 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6360 				kvm_debugfs_dir,
6361 				(void *)(long)pdesc->desc.offset, fops);
6362 	}
6363 
6364 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6365 		pdesc = &kvm_vcpu_stats_desc[i];
6366 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6367 			fops = &vcpu_stat_fops;
6368 		else
6369 			fops = &vcpu_stat_readonly_fops;
6370 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6371 				kvm_debugfs_dir,
6372 				(void *)(long)pdesc->desc.offset, fops);
6373 	}
6374 }
6375 
6376 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)6377 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6378 {
6379 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
6380 }
6381 
kvm_sched_in(struct preempt_notifier * pn,int cpu)6382 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6383 {
6384 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6385 
6386 	WRITE_ONCE(vcpu->preempted, false);
6387 	WRITE_ONCE(vcpu->ready, false);
6388 
6389 	__this_cpu_write(kvm_running_vcpu, vcpu);
6390 	kvm_arch_vcpu_load(vcpu, cpu);
6391 
6392 	WRITE_ONCE(vcpu->scheduled_out, false);
6393 }
6394 
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)6395 static void kvm_sched_out(struct preempt_notifier *pn,
6396 			  struct task_struct *next)
6397 {
6398 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6399 
6400 	WRITE_ONCE(vcpu->scheduled_out, true);
6401 
6402 	if (task_is_runnable(current) && vcpu->wants_to_run) {
6403 		WRITE_ONCE(vcpu->preempted, true);
6404 		WRITE_ONCE(vcpu->ready, true);
6405 	}
6406 	kvm_arch_vcpu_put(vcpu);
6407 	__this_cpu_write(kvm_running_vcpu, NULL);
6408 }
6409 
6410 /**
6411  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6412  *
6413  * We can disable preemption locally around accessing the per-CPU variable,
6414  * and use the resolved vcpu pointer after enabling preemption again,
6415  * because even if the current thread is migrated to another CPU, reading
6416  * the per-CPU value later will give us the same value as we update the
6417  * per-CPU variable in the preempt notifier handlers.
6418  */
kvm_get_running_vcpu(void)6419 struct kvm_vcpu *kvm_get_running_vcpu(void)
6420 {
6421 	struct kvm_vcpu *vcpu;
6422 
6423 	preempt_disable();
6424 	vcpu = __this_cpu_read(kvm_running_vcpu);
6425 	preempt_enable();
6426 
6427 	return vcpu;
6428 }
6429 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
6430 
6431 /**
6432  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6433  */
kvm_get_running_vcpus(void)6434 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6435 {
6436         return &kvm_running_vcpu;
6437 }
6438 
6439 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6440 static unsigned int kvm_guest_state(void)
6441 {
6442 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6443 	unsigned int state;
6444 
6445 	if (!kvm_arch_pmi_in_guest(vcpu))
6446 		return 0;
6447 
6448 	state = PERF_GUEST_ACTIVE;
6449 	if (!kvm_arch_vcpu_in_kernel(vcpu))
6450 		state |= PERF_GUEST_USER;
6451 
6452 	return state;
6453 }
6454 
kvm_guest_get_ip(void)6455 static unsigned long kvm_guest_get_ip(void)
6456 {
6457 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6458 
6459 	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6460 	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6461 		return 0;
6462 
6463 	return kvm_arch_vcpu_get_ip(vcpu);
6464 }
6465 
6466 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6467 	.state			= kvm_guest_state,
6468 	.get_ip			= kvm_guest_get_ip,
6469 	.handle_intel_pt_intr	= NULL,
6470 	.handle_mediated_pmi	= NULL,
6471 };
6472 
__kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void),void (* mediated_pmi_handler)(void))6473 void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void),
6474 				   void (*mediated_pmi_handler)(void))
6475 {
6476 	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6477 	kvm_guest_cbs.handle_mediated_pmi = mediated_pmi_handler;
6478 
6479 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6480 }
kvm_unregister_perf_callbacks(void)6481 void kvm_unregister_perf_callbacks(void)
6482 {
6483 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6484 }
6485 #endif
6486 
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)6487 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6488 {
6489 	int r;
6490 	int cpu;
6491 
6492 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
6493 	if (!vcpu_align)
6494 		vcpu_align = __alignof__(struct kvm_vcpu);
6495 	kvm_vcpu_cache =
6496 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6497 					   SLAB_ACCOUNT,
6498 					   offsetof(struct kvm_vcpu, arch),
6499 					   offsetofend(struct kvm_vcpu, stats_id)
6500 					   - offsetof(struct kvm_vcpu, arch),
6501 					   NULL);
6502 	if (!kvm_vcpu_cache)
6503 		return -ENOMEM;
6504 
6505 	for_each_possible_cpu(cpu) {
6506 		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6507 					    GFP_KERNEL, cpu_to_node(cpu))) {
6508 			r = -ENOMEM;
6509 			goto err_cpu_kick_mask;
6510 		}
6511 	}
6512 
6513 	r = kvm_irqfd_init();
6514 	if (r)
6515 		goto err_irqfd;
6516 
6517 	r = kvm_async_pf_init();
6518 	if (r)
6519 		goto err_async_pf;
6520 
6521 	kvm_chardev_ops.owner = module;
6522 	kvm_vm_fops.owner = module;
6523 	kvm_vcpu_fops.owner = module;
6524 	kvm_device_fops.owner = module;
6525 
6526 	kvm_preempt_ops.sched_in = kvm_sched_in;
6527 	kvm_preempt_ops.sched_out = kvm_sched_out;
6528 
6529 	kvm_init_debug();
6530 
6531 	r = kvm_vfio_ops_init();
6532 	if (WARN_ON_ONCE(r))
6533 		goto err_vfio;
6534 
6535 	r = kvm_gmem_init(module);
6536 	if (r)
6537 		goto err_gmem;
6538 
6539 	r = kvm_init_virtualization();
6540 	if (r)
6541 		goto err_virt;
6542 
6543 	/*
6544 	 * Registration _must_ be the very last thing done, as this exposes
6545 	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6546 	 */
6547 	r = misc_register(&kvm_dev);
6548 	if (r) {
6549 		pr_err("kvm: misc device register failed\n");
6550 		goto err_register;
6551 	}
6552 
6553 	return 0;
6554 
6555 err_register:
6556 	kvm_uninit_virtualization();
6557 err_virt:
6558 	kvm_gmem_exit();
6559 err_gmem:
6560 	kvm_vfio_ops_exit();
6561 err_vfio:
6562 	kvm_async_pf_deinit();
6563 err_async_pf:
6564 	kvm_irqfd_exit();
6565 err_irqfd:
6566 err_cpu_kick_mask:
6567 	for_each_possible_cpu(cpu)
6568 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6569 	kmem_cache_destroy(kvm_vcpu_cache);
6570 	return r;
6571 }
6572 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
6573 
kvm_exit(void)6574 void kvm_exit(void)
6575 {
6576 	int cpu;
6577 
6578 	/*
6579 	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6580 	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6581 	 * to KVM while the module is being stopped.
6582 	 */
6583 	misc_deregister(&kvm_dev);
6584 
6585 	kvm_uninit_virtualization();
6586 
6587 	debugfs_remove_recursive(kvm_debugfs_dir);
6588 	for_each_possible_cpu(cpu)
6589 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6590 	kmem_cache_destroy(kvm_vcpu_cache);
6591 	kvm_gmem_exit();
6592 	kvm_vfio_ops_exit();
6593 	kvm_async_pf_deinit();
6594 	kvm_irqfd_exit();
6595 }
6596 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);
6597