xref: /linux/virt/kvm/kvm_main.c (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine (KVM) Hypervisor
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  *
8  * Authors:
9  *   Avi Kivity   <avi@qumranet.com>
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  */
12 
13 #include <kvm/iodev.h>
14 
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 #include <linux/sort.h>
47 #include <linux/bsearch.h>
48 #include <linux/io.h>
49 #include <linux/lockdep.h>
50 #include <linux/kthread.h>
51 #include <linux/suspend.h>
52 
53 #include <asm/processor.h>
54 #include <asm/ioctl.h>
55 #include <linux/uaccess.h>
56 
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59 #include "kvm_mm.h"
60 #include "vfio.h"
61 
62 #include <trace/events/ipi.h>
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66 
67 #include <linux/kvm_dirty_ring.h>
68 
69 
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72 
73 MODULE_AUTHOR("Qumranet");
74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
75 MODULE_LICENSE("GPL");
76 
77 /* Architectures should define their poll value according to the halt latency */
78 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
79 module_param(halt_poll_ns, uint, 0644);
80 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
81 
82 /* Default doubles per-vcpu halt_poll_ns. */
83 unsigned int halt_poll_ns_grow = 2;
84 module_param(halt_poll_ns_grow, uint, 0644);
85 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
86 
87 /* The start value to grow halt_poll_ns from */
88 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
89 module_param(halt_poll_ns_grow_start, uint, 0644);
90 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
91 
92 /* Default halves per-vcpu halt_poll_ns. */
93 unsigned int halt_poll_ns_shrink = 2;
94 module_param(halt_poll_ns_shrink, uint, 0644);
95 EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
96 
97 /*
98  * Allow direct access (from KVM or the CPU) without MMU notifier protection
99  * to unpinned pages.
100  */
101 static bool allow_unsafe_mappings;
102 module_param(allow_unsafe_mappings, bool, 0444);
103 
104 /*
105  * Ordering of locks:
106  *
107  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
108  */
109 
110 DEFINE_MUTEX(kvm_lock);
111 LIST_HEAD(vm_list);
112 
113 static struct kmem_cache *kvm_vcpu_cache;
114 
115 static __read_mostly struct preempt_ops kvm_preempt_ops;
116 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
117 
118 static struct dentry *kvm_debugfs_dir;
119 
120 static const struct file_operations stat_fops_per_vm;
121 
122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
123 			   unsigned long arg);
124 #ifdef CONFIG_KVM_COMPAT
125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
126 				  unsigned long arg);
127 #define KVM_COMPAT(c)	.compat_ioctl	= (c)
128 #else
129 /*
130  * For architectures that don't implement a compat infrastructure,
131  * adopt a double line of defense:
132  * - Prevent a compat task from opening /dev/kvm
133  * - If the open has been done by a 64bit task, and the KVM fd
134  *   passed to a compat task, let the ioctls fail.
135  */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
137 				unsigned long arg) { return -EINVAL; }
138 
kvm_no_compat_open(struct inode * inode,struct file * file)139 static int kvm_no_compat_open(struct inode *inode, struct file *file)
140 {
141 	return is_compat_task() ? -ENODEV : 0;
142 }
143 #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
144 			.open		= kvm_no_compat_open
145 #endif
146 
147 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
148 
149 #define KVM_EVENT_CREATE_VM 0
150 #define KVM_EVENT_DESTROY_VM 1
151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
152 static unsigned long long kvm_createvm_count;
153 static unsigned long long kvm_active_vms;
154 
155 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
156 
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)157 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
158 {
159 }
160 
161 /*
162  * Switches to specified vcpu, until a matching vcpu_put()
163  */
vcpu_load(struct kvm_vcpu * vcpu)164 void vcpu_load(struct kvm_vcpu *vcpu)
165 {
166 	int cpu = get_cpu();
167 
168 	__this_cpu_write(kvm_running_vcpu, vcpu);
169 	preempt_notifier_register(&vcpu->preempt_notifier);
170 	kvm_arch_vcpu_load(vcpu, cpu);
171 	put_cpu();
172 }
173 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
174 
vcpu_put(struct kvm_vcpu * vcpu)175 void vcpu_put(struct kvm_vcpu *vcpu)
176 {
177 	preempt_disable();
178 	kvm_arch_vcpu_put(vcpu);
179 	preempt_notifier_unregister(&vcpu->preempt_notifier);
180 	__this_cpu_write(kvm_running_vcpu, NULL);
181 	preempt_enable();
182 }
183 EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
184 
185 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)186 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
187 {
188 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
189 
190 	/*
191 	 * We need to wait for the VCPU to reenable interrupts and get out of
192 	 * READING_SHADOW_PAGE_TABLES mode.
193 	 */
194 	if (req & KVM_REQUEST_WAIT)
195 		return mode != OUTSIDE_GUEST_MODE;
196 
197 	/*
198 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
199 	 */
200 	return mode == IN_GUEST_MODE;
201 }
202 
ack_kick(void * _completed)203 static void ack_kick(void *_completed)
204 {
205 }
206 
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)207 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
208 {
209 	if (cpumask_empty(cpus))
210 		return false;
211 
212 	smp_call_function_many(cpus, ack_kick, NULL, wait);
213 	return true;
214 }
215 
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)216 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
217 				  struct cpumask *tmp, int current_cpu)
218 {
219 	int cpu;
220 
221 	if (likely(!(req & KVM_REQUEST_NO_ACTION)))
222 		__kvm_make_request(req, vcpu);
223 
224 	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
225 		return;
226 
227 	/*
228 	 * Note, the vCPU could get migrated to a different pCPU at any point
229 	 * after kvm_request_needs_ipi(), which could result in sending an IPI
230 	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
231 	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
232 	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
233 	 * after this point is also OK, as the requirement is only that KVM wait
234 	 * for vCPUs that were reading SPTEs _before_ any changes were
235 	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
236 	 */
237 	if (kvm_request_needs_ipi(vcpu, req)) {
238 		cpu = READ_ONCE(vcpu->cpu);
239 		if (cpu != -1 && cpu != current_cpu)
240 			__cpumask_set_cpu(cpu, tmp);
241 	}
242 }
243 
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)244 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
245 				 unsigned long *vcpu_bitmap)
246 {
247 	struct kvm_vcpu *vcpu;
248 	struct cpumask *cpus;
249 	int i, me;
250 	bool called;
251 
252 	me = get_cpu();
253 
254 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
255 	cpumask_clear(cpus);
256 
257 	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
258 		vcpu = kvm_get_vcpu(kvm, i);
259 		if (!vcpu)
260 			continue;
261 		kvm_make_vcpu_request(vcpu, req, cpus, me);
262 	}
263 
264 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
265 	put_cpu();
266 
267 	return called;
268 }
269 
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)270 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
271 {
272 	struct kvm_vcpu *vcpu;
273 	struct cpumask *cpus;
274 	unsigned long i;
275 	bool called;
276 	int me;
277 
278 	me = get_cpu();
279 
280 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
281 	cpumask_clear(cpus);
282 
283 	kvm_for_each_vcpu(i, vcpu, kvm)
284 		kvm_make_vcpu_request(vcpu, req, cpus, me);
285 
286 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
287 	put_cpu();
288 
289 	return called;
290 }
291 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
292 
kvm_flush_remote_tlbs(struct kvm * kvm)293 void kvm_flush_remote_tlbs(struct kvm *kvm)
294 {
295 	++kvm->stat.generic.remote_tlb_flush_requests;
296 
297 	/*
298 	 * We want to publish modifications to the page tables before reading
299 	 * mode. Pairs with a memory barrier in arch-specific code.
300 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
301 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
302 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
303 	 *
304 	 * There is already an smp_mb__after_atomic() before
305 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
306 	 * barrier here.
307 	 */
308 	if (!kvm_arch_flush_remote_tlbs(kvm)
309 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
310 		++kvm->stat.generic.remote_tlb_flush;
311 }
312 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
313 
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)314 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
315 {
316 	if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
317 		return;
318 
319 	/*
320 	 * Fall back to a flushing entire TLBs if the architecture range-based
321 	 * TLB invalidation is unsupported or can't be performed for whatever
322 	 * reason.
323 	 */
324 	kvm_flush_remote_tlbs(kvm);
325 }
326 
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)327 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
328 				   const struct kvm_memory_slot *memslot)
329 {
330 	/*
331 	 * All current use cases for flushing the TLBs for a specific memslot
332 	 * are related to dirty logging, and many do the TLB flush out of
333 	 * mmu_lock. The interaction between the various operations on memslot
334 	 * must be serialized by slots_lock to ensure the TLB flush from one
335 	 * operation is observed by any other operation on the same memslot.
336 	 */
337 	lockdep_assert_held(&kvm->slots_lock);
338 	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
339 }
340 
kvm_flush_shadow_all(struct kvm * kvm)341 static void kvm_flush_shadow_all(struct kvm *kvm)
342 {
343 	kvm_arch_flush_shadow_all(kvm);
344 	kvm_arch_guest_memory_reclaimed(kvm);
345 }
346 
347 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)348 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
349 					       gfp_t gfp_flags)
350 {
351 	void *page;
352 
353 	gfp_flags |= mc->gfp_zero;
354 
355 	if (mc->kmem_cache)
356 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
357 
358 	page = (void *)__get_free_page(gfp_flags);
359 	if (page && mc->init_value)
360 		memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
361 	return page;
362 }
363 
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)364 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
365 {
366 	gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
367 	void *obj;
368 
369 	if (mc->nobjs >= min)
370 		return 0;
371 
372 	if (unlikely(!mc->objects)) {
373 		if (WARN_ON_ONCE(!capacity))
374 			return -EIO;
375 
376 		/*
377 		 * Custom init values can be used only for page allocations,
378 		 * and obviously conflict with __GFP_ZERO.
379 		 */
380 		if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
381 			return -EIO;
382 
383 		mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
384 		if (!mc->objects)
385 			return -ENOMEM;
386 
387 		mc->capacity = capacity;
388 	}
389 
390 	/* It is illegal to request a different capacity across topups. */
391 	if (WARN_ON_ONCE(mc->capacity != capacity))
392 		return -EIO;
393 
394 	while (mc->nobjs < mc->capacity) {
395 		obj = mmu_memory_cache_alloc_obj(mc, gfp);
396 		if (!obj)
397 			return mc->nobjs >= min ? 0 : -ENOMEM;
398 		mc->objects[mc->nobjs++] = obj;
399 	}
400 	return 0;
401 }
402 
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)403 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
404 {
405 	return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
406 }
407 
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)408 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
409 {
410 	return mc->nobjs;
411 }
412 
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)413 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
414 {
415 	while (mc->nobjs) {
416 		if (mc->kmem_cache)
417 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
418 		else
419 			free_page((unsigned long)mc->objects[--mc->nobjs]);
420 	}
421 
422 	kvfree(mc->objects);
423 
424 	mc->objects = NULL;
425 	mc->capacity = 0;
426 }
427 
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)428 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
429 {
430 	void *p;
431 
432 	if (WARN_ON(!mc->nobjs))
433 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
434 	else
435 		p = mc->objects[--mc->nobjs];
436 	BUG_ON(!p);
437 	return p;
438 }
439 #endif
440 
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)441 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
442 {
443 	mutex_init(&vcpu->mutex);
444 	vcpu->cpu = -1;
445 	vcpu->kvm = kvm;
446 	vcpu->vcpu_id = id;
447 	vcpu->pid = NULL;
448 	rwlock_init(&vcpu->pid_lock);
449 #ifndef __KVM_HAVE_ARCH_WQP
450 	rcuwait_init(&vcpu->wait);
451 #endif
452 	kvm_async_pf_vcpu_init(vcpu);
453 
454 	kvm_vcpu_set_in_spin_loop(vcpu, false);
455 	kvm_vcpu_set_dy_eligible(vcpu, false);
456 	vcpu->preempted = false;
457 	vcpu->ready = false;
458 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
459 	vcpu->last_used_slot = NULL;
460 
461 	/* Fill the stats id string for the vcpu */
462 	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
463 		 task_pid_nr(current), id);
464 }
465 
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)466 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
467 {
468 	kvm_arch_vcpu_destroy(vcpu);
469 	kvm_dirty_ring_free(&vcpu->dirty_ring);
470 
471 	/*
472 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
473 	 * the vcpu->pid pointer, and at destruction time all file descriptors
474 	 * are already gone.
475 	 */
476 	put_pid(vcpu->pid);
477 
478 	free_page((unsigned long)vcpu->run);
479 	kmem_cache_free(kvm_vcpu_cache, vcpu);
480 }
481 
kvm_destroy_vcpus(struct kvm * kvm)482 void kvm_destroy_vcpus(struct kvm *kvm)
483 {
484 	unsigned long i;
485 	struct kvm_vcpu *vcpu;
486 
487 	kvm_for_each_vcpu(i, vcpu, kvm) {
488 		kvm_vcpu_destroy(vcpu);
489 		xa_erase(&kvm->vcpu_array, i);
490 
491 		/*
492 		 * Assert that the vCPU isn't visible in any way, to ensure KVM
493 		 * doesn't trigger a use-after-free if destroying vCPUs results
494 		 * in VM-wide request, e.g. to flush remote TLBs when tearing
495 		 * down MMUs, or to mark the VM dead if a KVM_BUG_ON() fires.
496 		 */
497 		WARN_ON_ONCE(xa_load(&kvm->vcpu_array, i) || kvm_get_vcpu(kvm, i));
498 	}
499 
500 	atomic_set(&kvm->online_vcpus, 0);
501 }
502 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
503 
504 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
mmu_notifier_to_kvm(struct mmu_notifier * mn)505 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
506 {
507 	return container_of(mn, struct kvm, mmu_notifier);
508 }
509 
510 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
511 
512 typedef void (*on_lock_fn_t)(struct kvm *kvm);
513 
514 struct kvm_mmu_notifier_range {
515 	/*
516 	 * 64-bit addresses, as KVM notifiers can operate on host virtual
517 	 * addresses (unsigned long) and guest physical addresses (64-bit).
518 	 */
519 	u64 start;
520 	u64 end;
521 	union kvm_mmu_notifier_arg arg;
522 	gfn_handler_t handler;
523 	on_lock_fn_t on_lock;
524 	bool flush_on_ret;
525 	bool may_block;
526 	bool lockless;
527 };
528 
529 /*
530  * The inner-most helper returns a tuple containing the return value from the
531  * arch- and action-specific handler, plus a flag indicating whether or not at
532  * least one memslot was found, i.e. if the handler found guest memory.
533  *
534  * Note, most notifiers are averse to booleans, so even though KVM tracks the
535  * return from arch code as a bool, outer helpers will cast it to an int. :-(
536  */
537 typedef struct kvm_mmu_notifier_return {
538 	bool ret;
539 	bool found_memslot;
540 } kvm_mn_ret_t;
541 
542 /*
543  * Use a dedicated stub instead of NULL to indicate that there is no callback
544  * function/handler.  The compiler technically can't guarantee that a real
545  * function will have a non-zero address, and so it will generate code to
546  * check for !NULL, whereas comparing against a stub will be elided at compile
547  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
548  */
kvm_null_fn(void)549 static void kvm_null_fn(void)
550 {
551 
552 }
553 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
554 
555 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
556 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
557 	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
558 	     node;							     \
559 	     node = interval_tree_iter_next(node, start, last))	     \
560 
kvm_handle_hva_range(struct kvm * kvm,const struct kvm_mmu_notifier_range * range)561 static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
562 							 const struct kvm_mmu_notifier_range *range)
563 {
564 	struct kvm_mmu_notifier_return r = {
565 		.ret = false,
566 		.found_memslot = false,
567 	};
568 	struct kvm_gfn_range gfn_range;
569 	struct kvm_memory_slot *slot;
570 	struct kvm_memslots *slots;
571 	int i, idx;
572 
573 	if (WARN_ON_ONCE(range->end <= range->start))
574 		return r;
575 
576 	/* A null handler is allowed if and only if on_lock() is provided. */
577 	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
578 			 IS_KVM_NULL_FN(range->handler)))
579 		return r;
580 
581 	/* on_lock will never be called for lockless walks */
582 	if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
583 		return r;
584 
585 	idx = srcu_read_lock(&kvm->srcu);
586 
587 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
588 		struct interval_tree_node *node;
589 
590 		slots = __kvm_memslots(kvm, i);
591 		kvm_for_each_memslot_in_hva_range(node, slots,
592 						  range->start, range->end - 1) {
593 			unsigned long hva_start, hva_end;
594 
595 			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
596 			hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
597 			hva_end = min_t(unsigned long, range->end,
598 					slot->userspace_addr + (slot->npages << PAGE_SHIFT));
599 
600 			/*
601 			 * To optimize for the likely case where the address
602 			 * range is covered by zero or one memslots, don't
603 			 * bother making these conditional (to avoid writes on
604 			 * the second or later invocation of the handler).
605 			 */
606 			gfn_range.arg = range->arg;
607 			gfn_range.may_block = range->may_block;
608 			/*
609 			 * HVA-based notifications aren't relevant to private
610 			 * mappings as they don't have a userspace mapping.
611 			 */
612 			gfn_range.attr_filter = KVM_FILTER_SHARED;
613 
614 			/*
615 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
616 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
617 			 */
618 			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
619 			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
620 			gfn_range.slot = slot;
621 			gfn_range.lockless = range->lockless;
622 
623 			if (!r.found_memslot) {
624 				r.found_memslot = true;
625 				if (!range->lockless) {
626 					KVM_MMU_LOCK(kvm);
627 					if (!IS_KVM_NULL_FN(range->on_lock))
628 						range->on_lock(kvm);
629 
630 					if (IS_KVM_NULL_FN(range->handler))
631 						goto mmu_unlock;
632 				}
633 			}
634 			r.ret |= range->handler(kvm, &gfn_range);
635 		}
636 	}
637 
638 	if (range->flush_on_ret && r.ret)
639 		kvm_flush_remote_tlbs(kvm);
640 
641 mmu_unlock:
642 	if (r.found_memslot && !range->lockless)
643 		KVM_MMU_UNLOCK(kvm);
644 
645 	srcu_read_unlock(&kvm->srcu, idx);
646 
647 	return r;
648 }
649 
kvm_age_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler,bool flush_on_ret)650 static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
651 						unsigned long start,
652 						unsigned long end,
653 						gfn_handler_t handler,
654 						bool flush_on_ret)
655 {
656 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
657 	const struct kvm_mmu_notifier_range range = {
658 		.start		= start,
659 		.end		= end,
660 		.handler	= handler,
661 		.on_lock	= (void *)kvm_null_fn,
662 		.flush_on_ret	= flush_on_ret,
663 		.may_block	= false,
664 		.lockless	= IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING),
665 	};
666 
667 	return kvm_handle_hva_range(kvm, &range).ret;
668 }
669 
kvm_age_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,gfn_handler_t handler)670 static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
671 						      unsigned long start,
672 						      unsigned long end,
673 						      gfn_handler_t handler)
674 {
675 	return kvm_age_hva_range(mn, start, end, handler, false);
676 }
677 
kvm_mmu_invalidate_begin(struct kvm * kvm)678 void kvm_mmu_invalidate_begin(struct kvm *kvm)
679 {
680 	lockdep_assert_held_write(&kvm->mmu_lock);
681 	/*
682 	 * The count increase must become visible at unlock time as no
683 	 * spte can be established without taking the mmu_lock and
684 	 * count is also read inside the mmu_lock critical section.
685 	 */
686 	kvm->mmu_invalidate_in_progress++;
687 
688 	if (likely(kvm->mmu_invalidate_in_progress == 1)) {
689 		kvm->mmu_invalidate_range_start = INVALID_GPA;
690 		kvm->mmu_invalidate_range_end = INVALID_GPA;
691 	}
692 }
693 
kvm_mmu_invalidate_range_add(struct kvm * kvm,gfn_t start,gfn_t end)694 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
695 {
696 	lockdep_assert_held_write(&kvm->mmu_lock);
697 
698 	WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
699 
700 	if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
701 		kvm->mmu_invalidate_range_start = start;
702 		kvm->mmu_invalidate_range_end = end;
703 	} else {
704 		/*
705 		 * Fully tracking multiple concurrent ranges has diminishing
706 		 * returns. Keep things simple and just find the minimal range
707 		 * which includes the current and new ranges. As there won't be
708 		 * enough information to subtract a range after its invalidate
709 		 * completes, any ranges invalidated concurrently will
710 		 * accumulate and persist until all outstanding invalidates
711 		 * complete.
712 		 */
713 		kvm->mmu_invalidate_range_start =
714 			min(kvm->mmu_invalidate_range_start, start);
715 		kvm->mmu_invalidate_range_end =
716 			max(kvm->mmu_invalidate_range_end, end);
717 	}
718 }
719 
kvm_mmu_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)720 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
721 {
722 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
723 	return kvm_unmap_gfn_range(kvm, range);
724 }
725 
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)726 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
727 					const struct mmu_notifier_range *range)
728 {
729 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
730 	const struct kvm_mmu_notifier_range hva_range = {
731 		.start		= range->start,
732 		.end		= range->end,
733 		.handler	= kvm_mmu_unmap_gfn_range,
734 		.on_lock	= kvm_mmu_invalidate_begin,
735 		.flush_on_ret	= true,
736 		.may_block	= mmu_notifier_range_blockable(range),
737 	};
738 
739 	trace_kvm_unmap_hva_range(range->start, range->end);
740 
741 	/*
742 	 * Prevent memslot modification between range_start() and range_end()
743 	 * so that conditionally locking provides the same result in both
744 	 * functions.  Without that guarantee, the mmu_invalidate_in_progress
745 	 * adjustments will be imbalanced.
746 	 *
747 	 * Pairs with the decrement in range_end().
748 	 */
749 	spin_lock(&kvm->mn_invalidate_lock);
750 	kvm->mn_active_invalidate_count++;
751 	spin_unlock(&kvm->mn_invalidate_lock);
752 
753 	/*
754 	 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
755 	 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
756 	 * each cache's lock.  There are relatively few caches in existence at
757 	 * any given time, and the caches themselves can check for hva overlap,
758 	 * i.e. don't need to rely on memslot overlap checks for performance.
759 	 * Because this runs without holding mmu_lock, the pfn caches must use
760 	 * mn_active_invalidate_count (see above) instead of
761 	 * mmu_invalidate_in_progress.
762 	 */
763 	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
764 
765 	/*
766 	 * If one or more memslots were found and thus zapped, notify arch code
767 	 * that guest memory has been reclaimed.  This needs to be done *after*
768 	 * dropping mmu_lock, as x86's reclaim path is slooooow.
769 	 */
770 	if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
771 		kvm_arch_guest_memory_reclaimed(kvm);
772 
773 	return 0;
774 }
775 
kvm_mmu_invalidate_end(struct kvm * kvm)776 void kvm_mmu_invalidate_end(struct kvm *kvm)
777 {
778 	lockdep_assert_held_write(&kvm->mmu_lock);
779 
780 	/*
781 	 * This sequence increase will notify the kvm page fault that
782 	 * the page that is going to be mapped in the spte could have
783 	 * been freed.
784 	 */
785 	kvm->mmu_invalidate_seq++;
786 	smp_wmb();
787 	/*
788 	 * The above sequence increase must be visible before the
789 	 * below count decrease, which is ensured by the smp_wmb above
790 	 * in conjunction with the smp_rmb in mmu_invalidate_retry().
791 	 */
792 	kvm->mmu_invalidate_in_progress--;
793 	KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
794 
795 	/*
796 	 * Assert that at least one range was added between start() and end().
797 	 * Not adding a range isn't fatal, but it is a KVM bug.
798 	 */
799 	WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
800 }
801 
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)802 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
803 					const struct mmu_notifier_range *range)
804 {
805 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
806 	const struct kvm_mmu_notifier_range hva_range = {
807 		.start		= range->start,
808 		.end		= range->end,
809 		.handler	= (void *)kvm_null_fn,
810 		.on_lock	= kvm_mmu_invalidate_end,
811 		.flush_on_ret	= false,
812 		.may_block	= mmu_notifier_range_blockable(range),
813 	};
814 	bool wake;
815 
816 	kvm_handle_hva_range(kvm, &hva_range);
817 
818 	/* Pairs with the increment in range_start(). */
819 	spin_lock(&kvm->mn_invalidate_lock);
820 	if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
821 		--kvm->mn_active_invalidate_count;
822 	wake = !kvm->mn_active_invalidate_count;
823 	spin_unlock(&kvm->mn_invalidate_lock);
824 
825 	/*
826 	 * There can only be one waiter, since the wait happens under
827 	 * slots_lock.
828 	 */
829 	if (wake)
830 		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
831 }
832 
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)833 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
834 					      struct mm_struct *mm,
835 					      unsigned long start,
836 					      unsigned long end)
837 {
838 	trace_kvm_age_hva(start, end);
839 
840 	return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
841 				 !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
842 }
843 
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)844 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
845 					struct mm_struct *mm,
846 					unsigned long start,
847 					unsigned long end)
848 {
849 	trace_kvm_age_hva(start, end);
850 
851 	/*
852 	 * Even though we do not flush TLB, this will still adversely
853 	 * affect performance on pre-Haswell Intel EPT, where there is
854 	 * no EPT Access Bit to clear so that we have to tear down EPT
855 	 * tables instead. If we find this unacceptable, we can always
856 	 * add a parameter to kvm_age_hva so that it effectively doesn't
857 	 * do anything on clear_young.
858 	 *
859 	 * Also note that currently we never issue secondary TLB flushes
860 	 * from clear_young, leaving this job up to the regular system
861 	 * cadence. If we find this inaccurate, we might come up with a
862 	 * more sophisticated heuristic later.
863 	 */
864 	return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
865 }
866 
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)867 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
868 				       struct mm_struct *mm,
869 				       unsigned long address)
870 {
871 	trace_kvm_test_age_hva(address);
872 
873 	return kvm_age_hva_range_no_flush(mn, address, address + 1,
874 					  kvm_test_age_gfn);
875 }
876 
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)877 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
878 				     struct mm_struct *mm)
879 {
880 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
881 	int idx;
882 
883 	idx = srcu_read_lock(&kvm->srcu);
884 	kvm_flush_shadow_all(kvm);
885 	srcu_read_unlock(&kvm->srcu, idx);
886 }
887 
888 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
889 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
890 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
891 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
892 	.clear_young		= kvm_mmu_notifier_clear_young,
893 	.test_young		= kvm_mmu_notifier_test_young,
894 	.release		= kvm_mmu_notifier_release,
895 };
896 
kvm_init_mmu_notifier(struct kvm * kvm)897 static int kvm_init_mmu_notifier(struct kvm *kvm)
898 {
899 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
900 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
901 }
902 
903 #else  /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
904 
kvm_init_mmu_notifier(struct kvm * kvm)905 static int kvm_init_mmu_notifier(struct kvm *kvm)
906 {
907 	return 0;
908 }
909 
910 #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
911 
912 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)913 static int kvm_pm_notifier_call(struct notifier_block *bl,
914 				unsigned long state,
915 				void *unused)
916 {
917 	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
918 
919 	return kvm_arch_pm_notifier(kvm, state);
920 }
921 
kvm_init_pm_notifier(struct kvm * kvm)922 static void kvm_init_pm_notifier(struct kvm *kvm)
923 {
924 	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
925 	/* Suspend KVM before we suspend ftrace, RCU, etc. */
926 	kvm->pm_notifier.priority = INT_MAX;
927 	register_pm_notifier(&kvm->pm_notifier);
928 }
929 
kvm_destroy_pm_notifier(struct kvm * kvm)930 static void kvm_destroy_pm_notifier(struct kvm *kvm)
931 {
932 	unregister_pm_notifier(&kvm->pm_notifier);
933 }
934 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)935 static void kvm_init_pm_notifier(struct kvm *kvm)
936 {
937 }
938 
kvm_destroy_pm_notifier(struct kvm * kvm)939 static void kvm_destroy_pm_notifier(struct kvm *kvm)
940 {
941 }
942 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
943 
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)944 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
945 {
946 	if (!memslot->dirty_bitmap)
947 		return;
948 
949 	vfree(memslot->dirty_bitmap);
950 	memslot->dirty_bitmap = NULL;
951 }
952 
953 /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)954 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
955 {
956 	if (slot->flags & KVM_MEM_GUEST_MEMFD)
957 		kvm_gmem_unbind(slot);
958 
959 	kvm_destroy_dirty_bitmap(slot);
960 
961 	kvm_arch_free_memslot(kvm, slot);
962 
963 	kfree(slot);
964 }
965 
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)966 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
967 {
968 	struct hlist_node *idnode;
969 	struct kvm_memory_slot *memslot;
970 	int bkt;
971 
972 	/*
973 	 * The same memslot objects live in both active and inactive sets,
974 	 * arbitrarily free using index '1' so the second invocation of this
975 	 * function isn't operating over a structure with dangling pointers
976 	 * (even though this function isn't actually touching them).
977 	 */
978 	if (!slots->node_idx)
979 		return;
980 
981 	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
982 		kvm_free_memslot(kvm, memslot);
983 }
984 
kvm_stats_debugfs_mode(const struct _kvm_stats_desc * pdesc)985 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
986 {
987 	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
988 	case KVM_STATS_TYPE_INSTANT:
989 		return 0444;
990 	case KVM_STATS_TYPE_CUMULATIVE:
991 	case KVM_STATS_TYPE_PEAK:
992 	default:
993 		return 0644;
994 	}
995 }
996 
997 
kvm_destroy_vm_debugfs(struct kvm * kvm)998 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
999 {
1000 	int i;
1001 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1002 				      kvm_vcpu_stats_header.num_desc;
1003 
1004 	if (IS_ERR(kvm->debugfs_dentry))
1005 		return;
1006 
1007 	debugfs_remove_recursive(kvm->debugfs_dentry);
1008 
1009 	if (kvm->debugfs_stat_data) {
1010 		for (i = 0; i < kvm_debugfs_num_entries; i++)
1011 			kfree(kvm->debugfs_stat_data[i]);
1012 		kfree(kvm->debugfs_stat_data);
1013 	}
1014 }
1015 
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)1016 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1017 {
1018 	static DEFINE_MUTEX(kvm_debugfs_lock);
1019 	struct dentry *dent;
1020 	char dir_name[ITOA_MAX_LEN * 2];
1021 	struct kvm_stat_data *stat_data;
1022 	const struct _kvm_stats_desc *pdesc;
1023 	int i, ret = -ENOMEM;
1024 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1025 				      kvm_vcpu_stats_header.num_desc;
1026 
1027 	if (!debugfs_initialized())
1028 		return 0;
1029 
1030 	snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1031 	mutex_lock(&kvm_debugfs_lock);
1032 	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1033 	if (dent) {
1034 		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1035 		dput(dent);
1036 		mutex_unlock(&kvm_debugfs_lock);
1037 		return 0;
1038 	}
1039 	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1040 	mutex_unlock(&kvm_debugfs_lock);
1041 	if (IS_ERR(dent))
1042 		return 0;
1043 
1044 	kvm->debugfs_dentry = dent;
1045 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1046 					 sizeof(*kvm->debugfs_stat_data),
1047 					 GFP_KERNEL_ACCOUNT);
1048 	if (!kvm->debugfs_stat_data)
1049 		goto out_err;
1050 
1051 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1052 		pdesc = &kvm_vm_stats_desc[i];
1053 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1054 		if (!stat_data)
1055 			goto out_err;
1056 
1057 		stat_data->kvm = kvm;
1058 		stat_data->desc = pdesc;
1059 		stat_data->kind = KVM_STAT_VM;
1060 		kvm->debugfs_stat_data[i] = stat_data;
1061 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1062 				    kvm->debugfs_dentry, stat_data,
1063 				    &stat_fops_per_vm);
1064 	}
1065 
1066 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1067 		pdesc = &kvm_vcpu_stats_desc[i];
1068 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1069 		if (!stat_data)
1070 			goto out_err;
1071 
1072 		stat_data->kvm = kvm;
1073 		stat_data->desc = pdesc;
1074 		stat_data->kind = KVM_STAT_VCPU;
1075 		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1076 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1077 				    kvm->debugfs_dentry, stat_data,
1078 				    &stat_fops_per_vm);
1079 	}
1080 
1081 	kvm_arch_create_vm_debugfs(kvm);
1082 	return 0;
1083 out_err:
1084 	kvm_destroy_vm_debugfs(kvm);
1085 	return ret;
1086 }
1087 
1088 /*
1089  * Called just after removing the VM from the vm_list, but before doing any
1090  * other destruction.
1091  */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1092 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1093 {
1094 }
1095 
1096 /*
1097  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1098  * be setup already, so we can create arch-specific debugfs entries under it.
1099  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1100  * a per-arch destroy interface is not needed.
1101  */
kvm_arch_create_vm_debugfs(struct kvm * kvm)1102 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1103 {
1104 }
1105 
1106 /* Called only on cleanup and destruction paths when there are no users. */
kvm_get_bus_for_destruction(struct kvm * kvm,enum kvm_bus idx)1107 static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
1108 							     enum kvm_bus idx)
1109 {
1110 	return rcu_dereference_protected(kvm->buses[idx],
1111 					 !refcount_read(&kvm->users_count));
1112 }
1113 
kvm_create_vm(unsigned long type,const char * fdname)1114 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1115 {
1116 	struct kvm *kvm = kvm_arch_alloc_vm();
1117 	struct kvm_memslots *slots;
1118 	int r, i, j;
1119 
1120 	if (!kvm)
1121 		return ERR_PTR(-ENOMEM);
1122 
1123 	KVM_MMU_LOCK_INIT(kvm);
1124 	mmgrab(current->mm);
1125 	kvm->mm = current->mm;
1126 	kvm_eventfd_init(kvm);
1127 	mutex_init(&kvm->lock);
1128 	mutex_init(&kvm->irq_lock);
1129 	mutex_init(&kvm->slots_lock);
1130 	mutex_init(&kvm->slots_arch_lock);
1131 	spin_lock_init(&kvm->mn_invalidate_lock);
1132 	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1133 	xa_init(&kvm->vcpu_array);
1134 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1135 	xa_init(&kvm->mem_attr_array);
1136 #endif
1137 
1138 	INIT_LIST_HEAD(&kvm->gpc_list);
1139 	spin_lock_init(&kvm->gpc_lock);
1140 
1141 	INIT_LIST_HEAD(&kvm->devices);
1142 	kvm->max_vcpus = KVM_MAX_VCPUS;
1143 
1144 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1145 
1146 	/*
1147 	 * Force subsequent debugfs file creations to fail if the VM directory
1148 	 * is not created (by kvm_create_vm_debugfs()).
1149 	 */
1150 	kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1151 
1152 	snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1153 		 task_pid_nr(current));
1154 
1155 	r = -ENOMEM;
1156 	if (init_srcu_struct(&kvm->srcu))
1157 		goto out_err_no_srcu;
1158 	if (init_srcu_struct(&kvm->irq_srcu))
1159 		goto out_err_no_irq_srcu;
1160 
1161 	r = kvm_init_irq_routing(kvm);
1162 	if (r)
1163 		goto out_err_no_irq_routing;
1164 
1165 	refcount_set(&kvm->users_count, 1);
1166 
1167 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1168 		for (j = 0; j < 2; j++) {
1169 			slots = &kvm->__memslots[i][j];
1170 
1171 			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1172 			slots->hva_tree = RB_ROOT_CACHED;
1173 			slots->gfn_tree = RB_ROOT;
1174 			hash_init(slots->id_hash);
1175 			slots->node_idx = j;
1176 
1177 			/* Generations must be different for each address space. */
1178 			slots->generation = i;
1179 		}
1180 
1181 		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1182 	}
1183 
1184 	r = -ENOMEM;
1185 	for (i = 0; i < KVM_NR_BUSES; i++) {
1186 		rcu_assign_pointer(kvm->buses[i],
1187 			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1188 		if (!kvm->buses[i])
1189 			goto out_err_no_arch_destroy_vm;
1190 	}
1191 
1192 	r = kvm_arch_init_vm(kvm, type);
1193 	if (r)
1194 		goto out_err_no_arch_destroy_vm;
1195 
1196 	r = kvm_enable_virtualization();
1197 	if (r)
1198 		goto out_err_no_disable;
1199 
1200 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1201 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1202 #endif
1203 
1204 	r = kvm_init_mmu_notifier(kvm);
1205 	if (r)
1206 		goto out_err_no_mmu_notifier;
1207 
1208 	r = kvm_coalesced_mmio_init(kvm);
1209 	if (r < 0)
1210 		goto out_no_coalesced_mmio;
1211 
1212 	r = kvm_create_vm_debugfs(kvm, fdname);
1213 	if (r)
1214 		goto out_err_no_debugfs;
1215 
1216 	mutex_lock(&kvm_lock);
1217 	list_add(&kvm->vm_list, &vm_list);
1218 	mutex_unlock(&kvm_lock);
1219 
1220 	preempt_notifier_inc();
1221 	kvm_init_pm_notifier(kvm);
1222 
1223 	return kvm;
1224 
1225 out_err_no_debugfs:
1226 	kvm_coalesced_mmio_free(kvm);
1227 out_no_coalesced_mmio:
1228 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1229 	if (kvm->mmu_notifier.ops)
1230 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1231 #endif
1232 out_err_no_mmu_notifier:
1233 	kvm_disable_virtualization();
1234 out_err_no_disable:
1235 	kvm_arch_destroy_vm(kvm);
1236 out_err_no_arch_destroy_vm:
1237 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1238 	for (i = 0; i < KVM_NR_BUSES; i++)
1239 		kfree(kvm_get_bus_for_destruction(kvm, i));
1240 	kvm_free_irq_routing(kvm);
1241 out_err_no_irq_routing:
1242 	cleanup_srcu_struct(&kvm->irq_srcu);
1243 out_err_no_irq_srcu:
1244 	cleanup_srcu_struct(&kvm->srcu);
1245 out_err_no_srcu:
1246 	kvm_arch_free_vm(kvm);
1247 	mmdrop(current->mm);
1248 	return ERR_PTR(r);
1249 }
1250 
kvm_destroy_devices(struct kvm * kvm)1251 static void kvm_destroy_devices(struct kvm *kvm)
1252 {
1253 	struct kvm_device *dev, *tmp;
1254 
1255 	/*
1256 	 * We do not need to take the kvm->lock here, because nobody else
1257 	 * has a reference to the struct kvm at this point and therefore
1258 	 * cannot access the devices list anyhow.
1259 	 *
1260 	 * The device list is generally managed as an rculist, but list_del()
1261 	 * is used intentionally here. If a bug in KVM introduced a reader that
1262 	 * was not backed by a reference on the kvm struct, the hope is that
1263 	 * it'd consume the poisoned forward pointer instead of suffering a
1264 	 * use-after-free, even though this cannot be guaranteed.
1265 	 */
1266 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1267 		list_del(&dev->vm_node);
1268 		dev->ops->destroy(dev);
1269 	}
1270 }
1271 
kvm_destroy_vm(struct kvm * kvm)1272 static void kvm_destroy_vm(struct kvm *kvm)
1273 {
1274 	int i;
1275 	struct mm_struct *mm = kvm->mm;
1276 
1277 	kvm_destroy_pm_notifier(kvm);
1278 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1279 	kvm_destroy_vm_debugfs(kvm);
1280 	mutex_lock(&kvm_lock);
1281 	list_del(&kvm->vm_list);
1282 	mutex_unlock(&kvm_lock);
1283 	kvm_arch_pre_destroy_vm(kvm);
1284 
1285 	kvm_free_irq_routing(kvm);
1286 	for (i = 0; i < KVM_NR_BUSES; i++) {
1287 		struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
1288 
1289 		if (bus)
1290 			kvm_io_bus_destroy(bus);
1291 		kvm->buses[i] = NULL;
1292 	}
1293 	kvm_coalesced_mmio_free(kvm);
1294 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1295 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1296 	/*
1297 	 * At this point, pending calls to invalidate_range_start()
1298 	 * have completed but no more MMU notifiers will run, so
1299 	 * mn_active_invalidate_count may remain unbalanced.
1300 	 * No threads can be waiting in kvm_swap_active_memslots() as the
1301 	 * last reference on KVM has been dropped, but freeing
1302 	 * memslots would deadlock without this manual intervention.
1303 	 *
1304 	 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1305 	 * notifier between a start() and end(), then there shouldn't be any
1306 	 * in-progress invalidations.
1307 	 */
1308 	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1309 	if (kvm->mn_active_invalidate_count)
1310 		kvm->mn_active_invalidate_count = 0;
1311 	else
1312 		WARN_ON(kvm->mmu_invalidate_in_progress);
1313 #else
1314 	kvm_flush_shadow_all(kvm);
1315 #endif
1316 	kvm_arch_destroy_vm(kvm);
1317 	kvm_destroy_devices(kvm);
1318 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1319 		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1320 		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1321 	}
1322 	cleanup_srcu_struct(&kvm->irq_srcu);
1323 	srcu_barrier(&kvm->srcu);
1324 	cleanup_srcu_struct(&kvm->srcu);
1325 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1326 	xa_destroy(&kvm->mem_attr_array);
1327 #endif
1328 	kvm_arch_free_vm(kvm);
1329 	preempt_notifier_dec();
1330 	kvm_disable_virtualization();
1331 	mmdrop(mm);
1332 }
1333 
kvm_get_kvm(struct kvm * kvm)1334 void kvm_get_kvm(struct kvm *kvm)
1335 {
1336 	refcount_inc(&kvm->users_count);
1337 }
1338 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1339 
1340 /*
1341  * Make sure the vm is not during destruction, which is a safe version of
1342  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1343  */
kvm_get_kvm_safe(struct kvm * kvm)1344 bool kvm_get_kvm_safe(struct kvm *kvm)
1345 {
1346 	return refcount_inc_not_zero(&kvm->users_count);
1347 }
1348 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1349 
kvm_put_kvm(struct kvm * kvm)1350 void kvm_put_kvm(struct kvm *kvm)
1351 {
1352 	if (refcount_dec_and_test(&kvm->users_count))
1353 		kvm_destroy_vm(kvm);
1354 }
1355 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1356 
1357 /*
1358  * Used to put a reference that was taken on behalf of an object associated
1359  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1360  * of the new file descriptor fails and the reference cannot be transferred to
1361  * its final owner.  In such cases, the caller is still actively using @kvm and
1362  * will fail miserably if the refcount unexpectedly hits zero.
1363  */
kvm_put_kvm_no_destroy(struct kvm * kvm)1364 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1365 {
1366 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1367 }
1368 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
1369 
kvm_vm_release(struct inode * inode,struct file * filp)1370 static int kvm_vm_release(struct inode *inode, struct file *filp)
1371 {
1372 	struct kvm *kvm = filp->private_data;
1373 
1374 	kvm_irqfd_release(kvm);
1375 
1376 	kvm_put_kvm(kvm);
1377 	return 0;
1378 }
1379 
kvm_trylock_all_vcpus(struct kvm * kvm)1380 int kvm_trylock_all_vcpus(struct kvm *kvm)
1381 {
1382 	struct kvm_vcpu *vcpu;
1383 	unsigned long i, j;
1384 
1385 	lockdep_assert_held(&kvm->lock);
1386 
1387 	kvm_for_each_vcpu(i, vcpu, kvm)
1388 		if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock))
1389 			goto out_unlock;
1390 	return 0;
1391 
1392 out_unlock:
1393 	kvm_for_each_vcpu(j, vcpu, kvm) {
1394 		if (i == j)
1395 			break;
1396 		mutex_unlock(&vcpu->mutex);
1397 	}
1398 	return -EINTR;
1399 }
1400 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
1401 
kvm_lock_all_vcpus(struct kvm * kvm)1402 int kvm_lock_all_vcpus(struct kvm *kvm)
1403 {
1404 	struct kvm_vcpu *vcpu;
1405 	unsigned long i, j;
1406 	int r;
1407 
1408 	lockdep_assert_held(&kvm->lock);
1409 
1410 	kvm_for_each_vcpu(i, vcpu, kvm) {
1411 		r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock);
1412 		if (r)
1413 			goto out_unlock;
1414 	}
1415 	return 0;
1416 
1417 out_unlock:
1418 	kvm_for_each_vcpu(j, vcpu, kvm) {
1419 		if (i == j)
1420 			break;
1421 		mutex_unlock(&vcpu->mutex);
1422 	}
1423 	return r;
1424 }
1425 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
1426 
kvm_unlock_all_vcpus(struct kvm * kvm)1427 void kvm_unlock_all_vcpus(struct kvm *kvm)
1428 {
1429 	struct kvm_vcpu *vcpu;
1430 	unsigned long i;
1431 
1432 	lockdep_assert_held(&kvm->lock);
1433 
1434 	kvm_for_each_vcpu(i, vcpu, kvm)
1435 		mutex_unlock(&vcpu->mutex);
1436 }
1437 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
1438 
1439 /*
1440  * Allocation size is twice as large as the actual dirty bitmap size.
1441  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1442  */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)1443 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1444 {
1445 	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1446 
1447 	memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1448 	if (!memslot->dirty_bitmap)
1449 		return -ENOMEM;
1450 
1451 	return 0;
1452 }
1453 
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)1454 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1455 {
1456 	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1457 	int node_idx_inactive = active->node_idx ^ 1;
1458 
1459 	return &kvm->__memslots[as_id][node_idx_inactive];
1460 }
1461 
1462 /*
1463  * Helper to get the address space ID when one of memslot pointers may be NULL.
1464  * This also serves as a sanity that at least one of the pointers is non-NULL,
1465  * and that their address space IDs don't diverge.
1466  */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1467 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1468 				  struct kvm_memory_slot *b)
1469 {
1470 	if (WARN_ON_ONCE(!a && !b))
1471 		return 0;
1472 
1473 	if (!a)
1474 		return b->as_id;
1475 	if (!b)
1476 		return a->as_id;
1477 
1478 	WARN_ON_ONCE(a->as_id != b->as_id);
1479 	return a->as_id;
1480 }
1481 
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1482 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1483 				struct kvm_memory_slot *slot)
1484 {
1485 	struct rb_root *gfn_tree = &slots->gfn_tree;
1486 	struct rb_node **node, *parent;
1487 	int idx = slots->node_idx;
1488 
1489 	parent = NULL;
1490 	for (node = &gfn_tree->rb_node; *node; ) {
1491 		struct kvm_memory_slot *tmp;
1492 
1493 		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1494 		parent = *node;
1495 		if (slot->base_gfn < tmp->base_gfn)
1496 			node = &(*node)->rb_left;
1497 		else if (slot->base_gfn > tmp->base_gfn)
1498 			node = &(*node)->rb_right;
1499 		else
1500 			BUG();
1501 	}
1502 
1503 	rb_link_node(&slot->gfn_node[idx], parent, node);
1504 	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1505 }
1506 
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1507 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1508 			       struct kvm_memory_slot *slot)
1509 {
1510 	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1511 }
1512 
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1513 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1514 				 struct kvm_memory_slot *old,
1515 				 struct kvm_memory_slot *new)
1516 {
1517 	int idx = slots->node_idx;
1518 
1519 	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1520 
1521 	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1522 			&slots->gfn_tree);
1523 }
1524 
1525 /*
1526  * Replace @old with @new in the inactive memslots.
1527  *
1528  * With NULL @old this simply adds @new.
1529  * With NULL @new this simply removes @old.
1530  *
1531  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1532  * appropriately.
1533  */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1534 static void kvm_replace_memslot(struct kvm *kvm,
1535 				struct kvm_memory_slot *old,
1536 				struct kvm_memory_slot *new)
1537 {
1538 	int as_id = kvm_memslots_get_as_id(old, new);
1539 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1540 	int idx = slots->node_idx;
1541 
1542 	if (old) {
1543 		hash_del(&old->id_node[idx]);
1544 		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1545 
1546 		if ((long)old == atomic_long_read(&slots->last_used_slot))
1547 			atomic_long_set(&slots->last_used_slot, (long)new);
1548 
1549 		if (!new) {
1550 			kvm_erase_gfn_node(slots, old);
1551 			return;
1552 		}
1553 	}
1554 
1555 	/*
1556 	 * Initialize @new's hva range.  Do this even when replacing an @old
1557 	 * slot, kvm_copy_memslot() deliberately does not touch node data.
1558 	 */
1559 	new->hva_node[idx].start = new->userspace_addr;
1560 	new->hva_node[idx].last = new->userspace_addr +
1561 				  (new->npages << PAGE_SHIFT) - 1;
1562 
1563 	/*
1564 	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1565 	 * hva_node needs to be swapped with remove+insert even though hva can't
1566 	 * change when replacing an existing slot.
1567 	 */
1568 	hash_add(slots->id_hash, &new->id_node[idx], new->id);
1569 	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1570 
1571 	/*
1572 	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1573 	 * switch the node in the gfn tree instead of removing the old and
1574 	 * inserting the new as two separate operations. Replacement is a
1575 	 * single O(1) operation versus two O(log(n)) operations for
1576 	 * remove+insert.
1577 	 */
1578 	if (old && old->base_gfn == new->base_gfn) {
1579 		kvm_replace_gfn_node(slots, old, new);
1580 	} else {
1581 		if (old)
1582 			kvm_erase_gfn_node(slots, old);
1583 		kvm_insert_gfn_node(slots, new);
1584 	}
1585 }
1586 
1587 /*
1588  * Flags that do not access any of the extra space of struct
1589  * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1590  * only allows these.
1591  */
1592 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1593 	(KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1594 
check_memory_region_flags(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)1595 static int check_memory_region_flags(struct kvm *kvm,
1596 				     const struct kvm_userspace_memory_region2 *mem)
1597 {
1598 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1599 
1600 	if (IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
1601 		valid_flags |= KVM_MEM_GUEST_MEMFD;
1602 
1603 	/* Dirty logging private memory is not currently supported. */
1604 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
1605 		valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1606 
1607 	/*
1608 	 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1609 	 * read-only memslots have emulated MMIO, not page fault, semantics,
1610 	 * and KVM doesn't allow emulated MMIO for private memory.
1611 	 */
1612 	if (kvm_arch_has_readonly_mem(kvm) &&
1613 	    !(mem->flags & KVM_MEM_GUEST_MEMFD))
1614 		valid_flags |= KVM_MEM_READONLY;
1615 
1616 	if (mem->flags & ~valid_flags)
1617 		return -EINVAL;
1618 
1619 	return 0;
1620 }
1621 
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1622 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1623 {
1624 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1625 
1626 	/* Grab the generation from the activate memslots. */
1627 	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1628 
1629 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1630 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1631 
1632 	/*
1633 	 * Do not store the new memslots while there are invalidations in
1634 	 * progress, otherwise the locking in invalidate_range_start and
1635 	 * invalidate_range_end will be unbalanced.
1636 	 */
1637 	spin_lock(&kvm->mn_invalidate_lock);
1638 	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1639 	while (kvm->mn_active_invalidate_count) {
1640 		set_current_state(TASK_UNINTERRUPTIBLE);
1641 		spin_unlock(&kvm->mn_invalidate_lock);
1642 		schedule();
1643 		spin_lock(&kvm->mn_invalidate_lock);
1644 	}
1645 	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1646 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1647 	spin_unlock(&kvm->mn_invalidate_lock);
1648 
1649 	/*
1650 	 * Acquired in kvm_set_memslot. Must be released before synchronize
1651 	 * SRCU below in order to avoid deadlock with another thread
1652 	 * acquiring the slots_arch_lock in an srcu critical section.
1653 	 */
1654 	mutex_unlock(&kvm->slots_arch_lock);
1655 
1656 	synchronize_srcu_expedited(&kvm->srcu);
1657 
1658 	/*
1659 	 * Increment the new memslot generation a second time, dropping the
1660 	 * update in-progress flag and incrementing the generation based on
1661 	 * the number of address spaces.  This provides a unique and easily
1662 	 * identifiable generation number while the memslots are in flux.
1663 	 */
1664 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1665 
1666 	/*
1667 	 * Generations must be unique even across address spaces.  We do not need
1668 	 * a global counter for that, instead the generation space is evenly split
1669 	 * across address spaces.  For example, with two address spaces, address
1670 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1671 	 * use generations 1, 3, 5, ...
1672 	 */
1673 	gen += kvm_arch_nr_memslot_as_ids(kvm);
1674 
1675 	kvm_arch_memslots_updated(kvm, gen);
1676 
1677 	slots->generation = gen;
1678 }
1679 
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1680 static int kvm_prepare_memory_region(struct kvm *kvm,
1681 				     const struct kvm_memory_slot *old,
1682 				     struct kvm_memory_slot *new,
1683 				     enum kvm_mr_change change)
1684 {
1685 	int r;
1686 
1687 	/*
1688 	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1689 	 * will be freed on "commit".  If logging is enabled in both old and
1690 	 * new, reuse the existing bitmap.  If logging is enabled only in the
1691 	 * new and KVM isn't using a ring buffer, allocate and initialize a
1692 	 * new bitmap.
1693 	 */
1694 	if (change != KVM_MR_DELETE) {
1695 		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1696 			new->dirty_bitmap = NULL;
1697 		else if (old && old->dirty_bitmap)
1698 			new->dirty_bitmap = old->dirty_bitmap;
1699 		else if (kvm_use_dirty_bitmap(kvm)) {
1700 			r = kvm_alloc_dirty_bitmap(new);
1701 			if (r)
1702 				return r;
1703 
1704 			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1705 				bitmap_set(new->dirty_bitmap, 0, new->npages);
1706 		}
1707 	}
1708 
1709 	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1710 
1711 	/* Free the bitmap on failure if it was allocated above. */
1712 	if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1713 		kvm_destroy_dirty_bitmap(new);
1714 
1715 	return r;
1716 }
1717 
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1718 static void kvm_commit_memory_region(struct kvm *kvm,
1719 				     struct kvm_memory_slot *old,
1720 				     const struct kvm_memory_slot *new,
1721 				     enum kvm_mr_change change)
1722 {
1723 	int old_flags = old ? old->flags : 0;
1724 	int new_flags = new ? new->flags : 0;
1725 	/*
1726 	 * Update the total number of memslot pages before calling the arch
1727 	 * hook so that architectures can consume the result directly.
1728 	 */
1729 	if (change == KVM_MR_DELETE)
1730 		kvm->nr_memslot_pages -= old->npages;
1731 	else if (change == KVM_MR_CREATE)
1732 		kvm->nr_memslot_pages += new->npages;
1733 
1734 	if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1735 		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1736 		atomic_set(&kvm->nr_memslots_dirty_logging,
1737 			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1738 	}
1739 
1740 	kvm_arch_commit_memory_region(kvm, old, new, change);
1741 
1742 	switch (change) {
1743 	case KVM_MR_CREATE:
1744 		/* Nothing more to do. */
1745 		break;
1746 	case KVM_MR_DELETE:
1747 		/* Free the old memslot and all its metadata. */
1748 		kvm_free_memslot(kvm, old);
1749 		break;
1750 	case KVM_MR_MOVE:
1751 	case KVM_MR_FLAGS_ONLY:
1752 		/*
1753 		 * Free the dirty bitmap as needed; the below check encompasses
1754 		 * both the flags and whether a ring buffer is being used)
1755 		 */
1756 		if (old->dirty_bitmap && !new->dirty_bitmap)
1757 			kvm_destroy_dirty_bitmap(old);
1758 
1759 		/*
1760 		 * The final quirk.  Free the detached, old slot, but only its
1761 		 * memory, not any metadata.  Metadata, including arch specific
1762 		 * data, may be reused by @new.
1763 		 */
1764 		kfree(old);
1765 		break;
1766 	default:
1767 		BUG();
1768 	}
1769 }
1770 
1771 /*
1772  * Activate @new, which must be installed in the inactive slots by the caller,
1773  * by swapping the active slots and then propagating @new to @old once @old is
1774  * unreachable and can be safely modified.
1775  *
1776  * With NULL @old this simply adds @new to @active (while swapping the sets).
1777  * With NULL @new this simply removes @old from @active and frees it
1778  * (while also swapping the sets).
1779  */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1780 static void kvm_activate_memslot(struct kvm *kvm,
1781 				 struct kvm_memory_slot *old,
1782 				 struct kvm_memory_slot *new)
1783 {
1784 	int as_id = kvm_memslots_get_as_id(old, new);
1785 
1786 	kvm_swap_active_memslots(kvm, as_id);
1787 
1788 	/* Propagate the new memslot to the now inactive memslots. */
1789 	kvm_replace_memslot(kvm, old, new);
1790 }
1791 
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1792 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1793 			     const struct kvm_memory_slot *src)
1794 {
1795 	dest->base_gfn = src->base_gfn;
1796 	dest->npages = src->npages;
1797 	dest->dirty_bitmap = src->dirty_bitmap;
1798 	dest->arch = src->arch;
1799 	dest->userspace_addr = src->userspace_addr;
1800 	dest->flags = src->flags;
1801 	dest->id = src->id;
1802 	dest->as_id = src->as_id;
1803 }
1804 
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1805 static void kvm_invalidate_memslot(struct kvm *kvm,
1806 				   struct kvm_memory_slot *old,
1807 				   struct kvm_memory_slot *invalid_slot)
1808 {
1809 	/*
1810 	 * Mark the current slot INVALID.  As with all memslot modifications,
1811 	 * this must be done on an unreachable slot to avoid modifying the
1812 	 * current slot in the active tree.
1813 	 */
1814 	kvm_copy_memslot(invalid_slot, old);
1815 	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1816 	kvm_replace_memslot(kvm, old, invalid_slot);
1817 
1818 	/*
1819 	 * Activate the slot that is now marked INVALID, but don't propagate
1820 	 * the slot to the now inactive slots. The slot is either going to be
1821 	 * deleted or recreated as a new slot.
1822 	 */
1823 	kvm_swap_active_memslots(kvm, old->as_id);
1824 
1825 	/*
1826 	 * From this point no new shadow pages pointing to a deleted, or moved,
1827 	 * memslot will be created.  Validation of sp->gfn happens in:
1828 	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1829 	 *	- kvm_is_visible_gfn (mmu_check_root)
1830 	 */
1831 	kvm_arch_flush_shadow_memslot(kvm, old);
1832 	kvm_arch_guest_memory_reclaimed(kvm);
1833 
1834 	/* Was released by kvm_swap_active_memslots(), reacquire. */
1835 	mutex_lock(&kvm->slots_arch_lock);
1836 
1837 	/*
1838 	 * Copy the arch-specific field of the newly-installed slot back to the
1839 	 * old slot as the arch data could have changed between releasing
1840 	 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1841 	 * above.  Writers are required to retrieve memslots *after* acquiring
1842 	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1843 	 */
1844 	old->arch = invalid_slot->arch;
1845 }
1846 
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1847 static void kvm_create_memslot(struct kvm *kvm,
1848 			       struct kvm_memory_slot *new)
1849 {
1850 	/* Add the new memslot to the inactive set and activate. */
1851 	kvm_replace_memslot(kvm, NULL, new);
1852 	kvm_activate_memslot(kvm, NULL, new);
1853 }
1854 
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1855 static void kvm_delete_memslot(struct kvm *kvm,
1856 			       struct kvm_memory_slot *old,
1857 			       struct kvm_memory_slot *invalid_slot)
1858 {
1859 	/*
1860 	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1861 	 * the "new" slot, and for the invalid version in the active slots.
1862 	 */
1863 	kvm_replace_memslot(kvm, old, NULL);
1864 	kvm_activate_memslot(kvm, invalid_slot, NULL);
1865 }
1866 
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1867 static void kvm_move_memslot(struct kvm *kvm,
1868 			     struct kvm_memory_slot *old,
1869 			     struct kvm_memory_slot *new,
1870 			     struct kvm_memory_slot *invalid_slot)
1871 {
1872 	/*
1873 	 * Replace the old memslot in the inactive slots, and then swap slots
1874 	 * and replace the current INVALID with the new as well.
1875 	 */
1876 	kvm_replace_memslot(kvm, old, new);
1877 	kvm_activate_memslot(kvm, invalid_slot, new);
1878 }
1879 
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1880 static void kvm_update_flags_memslot(struct kvm *kvm,
1881 				     struct kvm_memory_slot *old,
1882 				     struct kvm_memory_slot *new)
1883 {
1884 	/*
1885 	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1886 	 * an intermediate step. Instead, the old memslot is simply replaced
1887 	 * with a new, updated copy in both memslot sets.
1888 	 */
1889 	kvm_replace_memslot(kvm, old, new);
1890 	kvm_activate_memslot(kvm, old, new);
1891 }
1892 
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1893 static int kvm_set_memslot(struct kvm *kvm,
1894 			   struct kvm_memory_slot *old,
1895 			   struct kvm_memory_slot *new,
1896 			   enum kvm_mr_change change)
1897 {
1898 	struct kvm_memory_slot *invalid_slot;
1899 	int r;
1900 
1901 	/*
1902 	 * Released in kvm_swap_active_memslots().
1903 	 *
1904 	 * Must be held from before the current memslots are copied until after
1905 	 * the new memslots are installed with rcu_assign_pointer, then
1906 	 * released before the synchronize srcu in kvm_swap_active_memslots().
1907 	 *
1908 	 * When modifying memslots outside of the slots_lock, must be held
1909 	 * before reading the pointer to the current memslots until after all
1910 	 * changes to those memslots are complete.
1911 	 *
1912 	 * These rules ensure that installing new memslots does not lose
1913 	 * changes made to the previous memslots.
1914 	 */
1915 	mutex_lock(&kvm->slots_arch_lock);
1916 
1917 	/*
1918 	 * Invalidate the old slot if it's being deleted or moved.  This is
1919 	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1920 	 * continue running by ensuring there are no mappings or shadow pages
1921 	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1922 	 * (and without a lock), a window would exist between effecting the
1923 	 * delete/move and committing the changes in arch code where KVM or a
1924 	 * guest could access a non-existent memslot.
1925 	 *
1926 	 * Modifications are done on a temporary, unreachable slot.  The old
1927 	 * slot needs to be preserved in case a later step fails and the
1928 	 * invalidation needs to be reverted.
1929 	 */
1930 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1931 		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1932 		if (!invalid_slot) {
1933 			mutex_unlock(&kvm->slots_arch_lock);
1934 			return -ENOMEM;
1935 		}
1936 		kvm_invalidate_memslot(kvm, old, invalid_slot);
1937 	}
1938 
1939 	r = kvm_prepare_memory_region(kvm, old, new, change);
1940 	if (r) {
1941 		/*
1942 		 * For DELETE/MOVE, revert the above INVALID change.  No
1943 		 * modifications required since the original slot was preserved
1944 		 * in the inactive slots.  Changing the active memslots also
1945 		 * release slots_arch_lock.
1946 		 */
1947 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1948 			kvm_activate_memslot(kvm, invalid_slot, old);
1949 			kfree(invalid_slot);
1950 		} else {
1951 			mutex_unlock(&kvm->slots_arch_lock);
1952 		}
1953 		return r;
1954 	}
1955 
1956 	/*
1957 	 * For DELETE and MOVE, the working slot is now active as the INVALID
1958 	 * version of the old slot.  MOVE is particularly special as it reuses
1959 	 * the old slot and returns a copy of the old slot (in working_slot).
1960 	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1961 	 * old slot is detached but otherwise preserved.
1962 	 */
1963 	if (change == KVM_MR_CREATE)
1964 		kvm_create_memslot(kvm, new);
1965 	else if (change == KVM_MR_DELETE)
1966 		kvm_delete_memslot(kvm, old, invalid_slot);
1967 	else if (change == KVM_MR_MOVE)
1968 		kvm_move_memslot(kvm, old, new, invalid_slot);
1969 	else if (change == KVM_MR_FLAGS_ONLY)
1970 		kvm_update_flags_memslot(kvm, old, new);
1971 	else
1972 		BUG();
1973 
1974 	/* Free the temporary INVALID slot used for DELETE and MOVE. */
1975 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1976 		kfree(invalid_slot);
1977 
1978 	/*
1979 	 * No need to refresh new->arch, changes after dropping slots_arch_lock
1980 	 * will directly hit the final, active memslot.  Architectures are
1981 	 * responsible for knowing that new->arch may be stale.
1982 	 */
1983 	kvm_commit_memory_region(kvm, old, new, change);
1984 
1985 	return 0;
1986 }
1987 
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1988 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1989 				      gfn_t start, gfn_t end)
1990 {
1991 	struct kvm_memslot_iter iter;
1992 
1993 	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1994 		if (iter.slot->id != id)
1995 			return true;
1996 	}
1997 
1998 	return false;
1999 }
2000 
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2001 static int kvm_set_memory_region(struct kvm *kvm,
2002 				 const struct kvm_userspace_memory_region2 *mem)
2003 {
2004 	struct kvm_memory_slot *old, *new;
2005 	struct kvm_memslots *slots;
2006 	enum kvm_mr_change change;
2007 	unsigned long npages;
2008 	gfn_t base_gfn;
2009 	int as_id, id;
2010 	int r;
2011 
2012 	lockdep_assert_held(&kvm->slots_lock);
2013 
2014 	r = check_memory_region_flags(kvm, mem);
2015 	if (r)
2016 		return r;
2017 
2018 	as_id = mem->slot >> 16;
2019 	id = (u16)mem->slot;
2020 
2021 	/* General sanity checks */
2022 	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2023 	    (mem->memory_size != (unsigned long)mem->memory_size))
2024 		return -EINVAL;
2025 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2026 		return -EINVAL;
2027 	/* We can read the guest memory with __xxx_user() later on. */
2028 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2029 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2030 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2031 			mem->memory_size))
2032 		return -EINVAL;
2033 	if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2034 	    (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2035 	     mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2036 		return -EINVAL;
2037 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2038 		return -EINVAL;
2039 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2040 		return -EINVAL;
2041 
2042 	/*
2043 	 * The size of userspace-defined memory regions is restricted in order
2044 	 * to play nice with dirty bitmap operations, which are indexed with an
2045 	 * "unsigned int".  KVM's internal memory regions don't support dirty
2046 	 * logging, and so are exempt.
2047 	 */
2048 	if (id < KVM_USER_MEM_SLOTS &&
2049 	    (mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2050 		return -EINVAL;
2051 
2052 	slots = __kvm_memslots(kvm, as_id);
2053 
2054 	/*
2055 	 * Note, the old memslot (and the pointer itself!) may be invalidated
2056 	 * and/or destroyed by kvm_set_memslot().
2057 	 */
2058 	old = id_to_memslot(slots, id);
2059 
2060 	if (!mem->memory_size) {
2061 		if (!old || !old->npages)
2062 			return -EINVAL;
2063 
2064 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2065 			return -EIO;
2066 
2067 		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2068 	}
2069 
2070 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2071 	npages = (mem->memory_size >> PAGE_SHIFT);
2072 
2073 	if (!old || !old->npages) {
2074 		change = KVM_MR_CREATE;
2075 
2076 		/*
2077 		 * To simplify KVM internals, the total number of pages across
2078 		 * all memslots must fit in an unsigned long.
2079 		 */
2080 		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2081 			return -EINVAL;
2082 	} else { /* Modify an existing slot. */
2083 		/* Private memslots are immutable, they can only be deleted. */
2084 		if (mem->flags & KVM_MEM_GUEST_MEMFD)
2085 			return -EINVAL;
2086 		if ((mem->userspace_addr != old->userspace_addr) ||
2087 		    (npages != old->npages) ||
2088 		    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2089 			return -EINVAL;
2090 
2091 		if (base_gfn != old->base_gfn)
2092 			change = KVM_MR_MOVE;
2093 		else if (mem->flags != old->flags)
2094 			change = KVM_MR_FLAGS_ONLY;
2095 		else /* Nothing to change. */
2096 			return 0;
2097 	}
2098 
2099 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2100 	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2101 		return -EEXIST;
2102 
2103 	/* Allocate a slot that will persist in the memslot. */
2104 	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2105 	if (!new)
2106 		return -ENOMEM;
2107 
2108 	new->as_id = as_id;
2109 	new->id = id;
2110 	new->base_gfn = base_gfn;
2111 	new->npages = npages;
2112 	new->flags = mem->flags;
2113 	new->userspace_addr = mem->userspace_addr;
2114 	if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2115 		r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2116 		if (r)
2117 			goto out;
2118 	}
2119 
2120 	r = kvm_set_memslot(kvm, old, new, change);
2121 	if (r)
2122 		goto out_unbind;
2123 
2124 	return 0;
2125 
2126 out_unbind:
2127 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
2128 		kvm_gmem_unbind(new);
2129 out:
2130 	kfree(new);
2131 	return r;
2132 }
2133 
kvm_set_internal_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region2 * mem)2134 int kvm_set_internal_memslot(struct kvm *kvm,
2135 			     const struct kvm_userspace_memory_region2 *mem)
2136 {
2137 	if (WARN_ON_ONCE(mem->slot < KVM_USER_MEM_SLOTS))
2138 		return -EINVAL;
2139 
2140 	if (WARN_ON_ONCE(mem->flags))
2141 		return -EINVAL;
2142 
2143 	return kvm_set_memory_region(kvm, mem);
2144 }
2145 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
2146 
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region2 * mem)2147 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2148 					  struct kvm_userspace_memory_region2 *mem)
2149 {
2150 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2151 		return -EINVAL;
2152 
2153 	guard(mutex)(&kvm->slots_lock);
2154 	return kvm_set_memory_region(kvm, mem);
2155 }
2156 
2157 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2158 /**
2159  * kvm_get_dirty_log - get a snapshot of dirty pages
2160  * @kvm:	pointer to kvm instance
2161  * @log:	slot id and address to which we copy the log
2162  * @is_dirty:	set to '1' if any dirty pages were found
2163  * @memslot:	set to the associated memslot, always valid on success
2164  */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)2165 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2166 		      int *is_dirty, struct kvm_memory_slot **memslot)
2167 {
2168 	struct kvm_memslots *slots;
2169 	int i, as_id, id;
2170 	unsigned long n;
2171 	unsigned long any = 0;
2172 
2173 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2174 	if (!kvm_use_dirty_bitmap(kvm))
2175 		return -ENXIO;
2176 
2177 	*memslot = NULL;
2178 	*is_dirty = 0;
2179 
2180 	as_id = log->slot >> 16;
2181 	id = (u16)log->slot;
2182 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2183 		return -EINVAL;
2184 
2185 	slots = __kvm_memslots(kvm, as_id);
2186 	*memslot = id_to_memslot(slots, id);
2187 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
2188 		return -ENOENT;
2189 
2190 	kvm_arch_sync_dirty_log(kvm, *memslot);
2191 
2192 	n = kvm_dirty_bitmap_bytes(*memslot);
2193 
2194 	for (i = 0; !any && i < n/sizeof(long); ++i)
2195 		any = (*memslot)->dirty_bitmap[i];
2196 
2197 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2198 		return -EFAULT;
2199 
2200 	if (any)
2201 		*is_dirty = 1;
2202 	return 0;
2203 }
2204 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
2205 
2206 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2207 /**
2208  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2209  *	and reenable dirty page tracking for the corresponding pages.
2210  * @kvm:	pointer to kvm instance
2211  * @log:	slot id and address to which we copy the log
2212  *
2213  * We need to keep it in mind that VCPU threads can write to the bitmap
2214  * concurrently. So, to avoid losing track of dirty pages we keep the
2215  * following order:
2216  *
2217  *    1. Take a snapshot of the bit and clear it if needed.
2218  *    2. Write protect the corresponding page.
2219  *    3. Copy the snapshot to the userspace.
2220  *    4. Upon return caller flushes TLB's if needed.
2221  *
2222  * Between 2 and 4, the guest may write to the page using the remaining TLB
2223  * entry.  This is not a problem because the page is reported dirty using
2224  * the snapshot taken before and step 4 ensures that writes done after
2225  * exiting to userspace will be logged for the next call.
2226  *
2227  */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)2228 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2229 {
2230 	struct kvm_memslots *slots;
2231 	struct kvm_memory_slot *memslot;
2232 	int i, as_id, id;
2233 	unsigned long n;
2234 	unsigned long *dirty_bitmap;
2235 	unsigned long *dirty_bitmap_buffer;
2236 	bool flush;
2237 
2238 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2239 	if (!kvm_use_dirty_bitmap(kvm))
2240 		return -ENXIO;
2241 
2242 	as_id = log->slot >> 16;
2243 	id = (u16)log->slot;
2244 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2245 		return -EINVAL;
2246 
2247 	slots = __kvm_memslots(kvm, as_id);
2248 	memslot = id_to_memslot(slots, id);
2249 	if (!memslot || !memslot->dirty_bitmap)
2250 		return -ENOENT;
2251 
2252 	dirty_bitmap = memslot->dirty_bitmap;
2253 
2254 	kvm_arch_sync_dirty_log(kvm, memslot);
2255 
2256 	n = kvm_dirty_bitmap_bytes(memslot);
2257 	flush = false;
2258 	if (kvm->manual_dirty_log_protect) {
2259 		/*
2260 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
2261 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2262 		 * is some code duplication between this function and
2263 		 * kvm_get_dirty_log, but hopefully all architecture
2264 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2265 		 * can be eliminated.
2266 		 */
2267 		dirty_bitmap_buffer = dirty_bitmap;
2268 	} else {
2269 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2270 		memset(dirty_bitmap_buffer, 0, n);
2271 
2272 		KVM_MMU_LOCK(kvm);
2273 		for (i = 0; i < n / sizeof(long); i++) {
2274 			unsigned long mask;
2275 			gfn_t offset;
2276 
2277 			if (!dirty_bitmap[i])
2278 				continue;
2279 
2280 			flush = true;
2281 			mask = xchg(&dirty_bitmap[i], 0);
2282 			dirty_bitmap_buffer[i] = mask;
2283 
2284 			offset = i * BITS_PER_LONG;
2285 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2286 								offset, mask);
2287 		}
2288 		KVM_MMU_UNLOCK(kvm);
2289 	}
2290 
2291 	if (flush)
2292 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2293 
2294 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2295 		return -EFAULT;
2296 	return 0;
2297 }
2298 
2299 
2300 /**
2301  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2302  * @kvm: kvm instance
2303  * @log: slot id and address to which we copy the log
2304  *
2305  * Steps 1-4 below provide general overview of dirty page logging. See
2306  * kvm_get_dirty_log_protect() function description for additional details.
2307  *
2308  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2309  * always flush the TLB (step 4) even if previous step failed  and the dirty
2310  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2311  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2312  * writes will be marked dirty for next log read.
2313  *
2314  *   1. Take a snapshot of the bit and clear it if needed.
2315  *   2. Write protect the corresponding page.
2316  *   3. Copy the snapshot to the userspace.
2317  *   4. Flush TLB's if needed.
2318  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)2319 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2320 				      struct kvm_dirty_log *log)
2321 {
2322 	int r;
2323 
2324 	mutex_lock(&kvm->slots_lock);
2325 
2326 	r = kvm_get_dirty_log_protect(kvm, log);
2327 
2328 	mutex_unlock(&kvm->slots_lock);
2329 	return r;
2330 }
2331 
2332 /**
2333  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2334  *	and reenable dirty page tracking for the corresponding pages.
2335  * @kvm:	pointer to kvm instance
2336  * @log:	slot id and address from which to fetch the bitmap of dirty pages
2337  */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)2338 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2339 				       struct kvm_clear_dirty_log *log)
2340 {
2341 	struct kvm_memslots *slots;
2342 	struct kvm_memory_slot *memslot;
2343 	int as_id, id;
2344 	gfn_t offset;
2345 	unsigned long i, n;
2346 	unsigned long *dirty_bitmap;
2347 	unsigned long *dirty_bitmap_buffer;
2348 	bool flush;
2349 
2350 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2351 	if (!kvm_use_dirty_bitmap(kvm))
2352 		return -ENXIO;
2353 
2354 	as_id = log->slot >> 16;
2355 	id = (u16)log->slot;
2356 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2357 		return -EINVAL;
2358 
2359 	if (log->first_page & 63)
2360 		return -EINVAL;
2361 
2362 	slots = __kvm_memslots(kvm, as_id);
2363 	memslot = id_to_memslot(slots, id);
2364 	if (!memslot || !memslot->dirty_bitmap)
2365 		return -ENOENT;
2366 
2367 	dirty_bitmap = memslot->dirty_bitmap;
2368 
2369 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2370 
2371 	if (log->first_page > memslot->npages ||
2372 	    log->num_pages > memslot->npages - log->first_page ||
2373 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2374 	    return -EINVAL;
2375 
2376 	kvm_arch_sync_dirty_log(kvm, memslot);
2377 
2378 	flush = false;
2379 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2380 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2381 		return -EFAULT;
2382 
2383 	KVM_MMU_LOCK(kvm);
2384 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
2385 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2386 	     i++, offset += BITS_PER_LONG) {
2387 		unsigned long mask = *dirty_bitmap_buffer++;
2388 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2389 		if (!mask)
2390 			continue;
2391 
2392 		mask &= atomic_long_fetch_andnot(mask, p);
2393 
2394 		/*
2395 		 * mask contains the bits that really have been cleared.  This
2396 		 * never includes any bits beyond the length of the memslot (if
2397 		 * the length is not aligned to 64 pages), therefore it is not
2398 		 * a problem if userspace sets them in log->dirty_bitmap.
2399 		*/
2400 		if (mask) {
2401 			flush = true;
2402 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2403 								offset, mask);
2404 		}
2405 	}
2406 	KVM_MMU_UNLOCK(kvm);
2407 
2408 	if (flush)
2409 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2410 
2411 	return 0;
2412 }
2413 
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2414 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2415 					struct kvm_clear_dirty_log *log)
2416 {
2417 	int r;
2418 
2419 	mutex_lock(&kvm->slots_lock);
2420 
2421 	r = kvm_clear_dirty_log_protect(kvm, log);
2422 
2423 	mutex_unlock(&kvm->slots_lock);
2424 	return r;
2425 }
2426 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2427 
2428 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_supported_mem_attributes(struct kvm * kvm)2429 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2430 {
2431 	if (!kvm || kvm_arch_has_private_mem(kvm))
2432 		return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2433 
2434 	return 0;
2435 }
2436 
2437 /*
2438  * Returns true if _all_ gfns in the range [@start, @end) have attributes
2439  * such that the bits in @mask match @attrs.
2440  */
kvm_range_has_memory_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long mask,unsigned long attrs)2441 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2442 				     unsigned long mask, unsigned long attrs)
2443 {
2444 	XA_STATE(xas, &kvm->mem_attr_array, start);
2445 	unsigned long index;
2446 	void *entry;
2447 
2448 	mask &= kvm_supported_mem_attributes(kvm);
2449 	if (attrs & ~mask)
2450 		return false;
2451 
2452 	if (end == start + 1)
2453 		return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2454 
2455 	guard(rcu)();
2456 	if (!attrs)
2457 		return !xas_find(&xas, end - 1);
2458 
2459 	for (index = start; index < end; index++) {
2460 		do {
2461 			entry = xas_next(&xas);
2462 		} while (xas_retry(&xas, entry));
2463 
2464 		if (xas.xa_index != index ||
2465 		    (xa_to_value(entry) & mask) != attrs)
2466 			return false;
2467 	}
2468 
2469 	return true;
2470 }
2471 
kvm_handle_gfn_range(struct kvm * kvm,struct kvm_mmu_notifier_range * range)2472 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2473 						 struct kvm_mmu_notifier_range *range)
2474 {
2475 	struct kvm_gfn_range gfn_range;
2476 	struct kvm_memory_slot *slot;
2477 	struct kvm_memslots *slots;
2478 	struct kvm_memslot_iter iter;
2479 	bool found_memslot = false;
2480 	bool ret = false;
2481 	int i;
2482 
2483 	gfn_range.arg = range->arg;
2484 	gfn_range.may_block = range->may_block;
2485 
2486 	/*
2487 	 * If/when KVM supports more attributes beyond private .vs shared, this
2488 	 * _could_ set KVM_FILTER_{SHARED,PRIVATE} appropriately if the entire target
2489 	 * range already has the desired private vs. shared state (it's unclear
2490 	 * if that is a net win).  For now, KVM reaches this point if and only
2491 	 * if the private flag is being toggled, i.e. all mappings are in play.
2492 	 */
2493 
2494 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2495 		slots = __kvm_memslots(kvm, i);
2496 
2497 		kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2498 			slot = iter.slot;
2499 			gfn_range.slot = slot;
2500 
2501 			gfn_range.start = max(range->start, slot->base_gfn);
2502 			gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2503 			if (gfn_range.start >= gfn_range.end)
2504 				continue;
2505 
2506 			if (!found_memslot) {
2507 				found_memslot = true;
2508 				KVM_MMU_LOCK(kvm);
2509 				if (!IS_KVM_NULL_FN(range->on_lock))
2510 					range->on_lock(kvm);
2511 			}
2512 
2513 			ret |= range->handler(kvm, &gfn_range);
2514 		}
2515 	}
2516 
2517 	if (range->flush_on_ret && ret)
2518 		kvm_flush_remote_tlbs(kvm);
2519 
2520 	if (found_memslot)
2521 		KVM_MMU_UNLOCK(kvm);
2522 }
2523 
kvm_pre_set_memory_attributes(struct kvm * kvm,struct kvm_gfn_range * range)2524 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2525 					  struct kvm_gfn_range *range)
2526 {
2527 	/*
2528 	 * Unconditionally add the range to the invalidation set, regardless of
2529 	 * whether or not the arch callback actually needs to zap SPTEs.  E.g.
2530 	 * if KVM supports RWX attributes in the future and the attributes are
2531 	 * going from R=>RW, zapping isn't strictly necessary.  Unconditionally
2532 	 * adding the range allows KVM to require that MMU invalidations add at
2533 	 * least one range between begin() and end(), e.g. allows KVM to detect
2534 	 * bugs where the add() is missed.  Relaxing the rule *might* be safe,
2535 	 * but it's not obvious that allowing new mappings while the attributes
2536 	 * are in flux is desirable or worth the complexity.
2537 	 */
2538 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2539 
2540 	return kvm_arch_pre_set_memory_attributes(kvm, range);
2541 }
2542 
2543 /* Set @attributes for the gfn range [@start, @end). */
kvm_vm_set_mem_attributes(struct kvm * kvm,gfn_t start,gfn_t end,unsigned long attributes)2544 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2545 				     unsigned long attributes)
2546 {
2547 	struct kvm_mmu_notifier_range pre_set_range = {
2548 		.start = start,
2549 		.end = end,
2550 		.arg.attributes = attributes,
2551 		.handler = kvm_pre_set_memory_attributes,
2552 		.on_lock = kvm_mmu_invalidate_begin,
2553 		.flush_on_ret = true,
2554 		.may_block = true,
2555 	};
2556 	struct kvm_mmu_notifier_range post_set_range = {
2557 		.start = start,
2558 		.end = end,
2559 		.arg.attributes = attributes,
2560 		.handler = kvm_arch_post_set_memory_attributes,
2561 		.on_lock = kvm_mmu_invalidate_end,
2562 		.may_block = true,
2563 	};
2564 	unsigned long i;
2565 	void *entry;
2566 	int r = 0;
2567 
2568 	entry = attributes ? xa_mk_value(attributes) : NULL;
2569 
2570 	trace_kvm_vm_set_mem_attributes(start, end, attributes);
2571 
2572 	mutex_lock(&kvm->slots_lock);
2573 
2574 	/* Nothing to do if the entire range has the desired attributes. */
2575 	if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2576 		goto out_unlock;
2577 
2578 	/*
2579 	 * Reserve memory ahead of time to avoid having to deal with failures
2580 	 * partway through setting the new attributes.
2581 	 */
2582 	for (i = start; i < end; i++) {
2583 		r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2584 		if (r)
2585 			goto out_unlock;
2586 
2587 		cond_resched();
2588 	}
2589 
2590 	kvm_handle_gfn_range(kvm, &pre_set_range);
2591 
2592 	for (i = start; i < end; i++) {
2593 		r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2594 				    GFP_KERNEL_ACCOUNT));
2595 		KVM_BUG_ON(r, kvm);
2596 		cond_resched();
2597 	}
2598 
2599 	kvm_handle_gfn_range(kvm, &post_set_range);
2600 
2601 out_unlock:
2602 	mutex_unlock(&kvm->slots_lock);
2603 
2604 	return r;
2605 }
kvm_vm_ioctl_set_mem_attributes(struct kvm * kvm,struct kvm_memory_attributes * attrs)2606 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2607 					   struct kvm_memory_attributes *attrs)
2608 {
2609 	gfn_t start, end;
2610 
2611 	/* flags is currently not used. */
2612 	if (attrs->flags)
2613 		return -EINVAL;
2614 	if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2615 		return -EINVAL;
2616 	if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2617 		return -EINVAL;
2618 	if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2619 		return -EINVAL;
2620 
2621 	start = attrs->address >> PAGE_SHIFT;
2622 	end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2623 
2624 	/*
2625 	 * xarray tracks data using "unsigned long", and as a result so does
2626 	 * KVM.  For simplicity, supports generic attributes only on 64-bit
2627 	 * architectures.
2628 	 */
2629 	BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2630 
2631 	return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2632 }
2633 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2634 
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2635 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2636 {
2637 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2638 }
2639 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
2640 
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2641 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2642 {
2643 	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2644 	u64 gen = slots->generation;
2645 	struct kvm_memory_slot *slot;
2646 
2647 	/*
2648 	 * This also protects against using a memslot from a different address space,
2649 	 * since different address spaces have different generation numbers.
2650 	 */
2651 	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2652 		vcpu->last_used_slot = NULL;
2653 		vcpu->last_used_slot_gen = gen;
2654 	}
2655 
2656 	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2657 	if (slot)
2658 		return slot;
2659 
2660 	/*
2661 	 * Fall back to searching all memslots. We purposely use
2662 	 * search_memslots() instead of __gfn_to_memslot() to avoid
2663 	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2664 	 */
2665 	slot = search_memslots(slots, gfn, false);
2666 	if (slot) {
2667 		vcpu->last_used_slot = slot;
2668 		return slot;
2669 	}
2670 
2671 	return NULL;
2672 }
2673 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
2674 
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)2675 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2676 {
2677 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2678 
2679 	return kvm_is_visible_memslot(memslot);
2680 }
2681 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
2682 
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)2683 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2684 {
2685 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2686 
2687 	return kvm_is_visible_memslot(memslot);
2688 }
2689 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
2690 
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)2691 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2692 {
2693 	struct vm_area_struct *vma;
2694 	unsigned long addr, size;
2695 
2696 	size = PAGE_SIZE;
2697 
2698 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2699 	if (kvm_is_error_hva(addr))
2700 		return PAGE_SIZE;
2701 
2702 	mmap_read_lock(current->mm);
2703 	vma = find_vma(current->mm, addr);
2704 	if (!vma)
2705 		goto out;
2706 
2707 	size = vma_kernel_pagesize(vma);
2708 
2709 out:
2710 	mmap_read_unlock(current->mm);
2711 
2712 	return size;
2713 }
2714 
memslot_is_readonly(const struct kvm_memory_slot * slot)2715 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2716 {
2717 	return slot->flags & KVM_MEM_READONLY;
2718 }
2719 
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)2720 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2721 				       gfn_t *nr_pages, bool write)
2722 {
2723 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2724 		return KVM_HVA_ERR_BAD;
2725 
2726 	if (memslot_is_readonly(slot) && write)
2727 		return KVM_HVA_ERR_RO_BAD;
2728 
2729 	if (nr_pages)
2730 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
2731 
2732 	return __gfn_to_hva_memslot(slot, gfn);
2733 }
2734 
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2735 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2736 				     gfn_t *nr_pages)
2737 {
2738 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2739 }
2740 
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2741 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2742 					gfn_t gfn)
2743 {
2744 	return gfn_to_hva_many(slot, gfn, NULL);
2745 }
2746 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
2747 
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2748 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2749 {
2750 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2751 }
2752 EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
2753 
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)2754 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2755 {
2756 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2757 }
2758 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
2759 
2760 /*
2761  * Return the hva of a @gfn and the R/W attribute if possible.
2762  *
2763  * @slot: the kvm_memory_slot which contains @gfn
2764  * @gfn: the gfn to be translated
2765  * @writable: used to return the read/write attribute of the @slot if the hva
2766  * is valid and @writable is not NULL
2767  */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2768 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2769 				      gfn_t gfn, bool *writable)
2770 {
2771 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2772 
2773 	if (!kvm_is_error_hva(hva) && writable)
2774 		*writable = !memslot_is_readonly(slot);
2775 
2776 	return hva;
2777 }
2778 
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)2779 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2780 {
2781 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2782 
2783 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2784 }
2785 
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)2786 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2787 {
2788 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2789 
2790 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2791 }
2792 
kvm_is_ad_tracked_page(struct page * page)2793 static bool kvm_is_ad_tracked_page(struct page *page)
2794 {
2795 	/*
2796 	 * Per page-flags.h, pages tagged PG_reserved "should in general not be
2797 	 * touched (e.g. set dirty) except by its owner".
2798 	 */
2799 	return !PageReserved(page);
2800 }
2801 
kvm_set_page_dirty(struct page * page)2802 static void kvm_set_page_dirty(struct page *page)
2803 {
2804 	if (kvm_is_ad_tracked_page(page))
2805 		SetPageDirty(page);
2806 }
2807 
kvm_set_page_accessed(struct page * page)2808 static void kvm_set_page_accessed(struct page *page)
2809 {
2810 	if (kvm_is_ad_tracked_page(page))
2811 		mark_page_accessed(page);
2812 }
2813 
kvm_release_page_clean(struct page * page)2814 void kvm_release_page_clean(struct page *page)
2815 {
2816 	if (!page)
2817 		return;
2818 
2819 	kvm_set_page_accessed(page);
2820 	put_page(page);
2821 }
2822 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
2823 
kvm_release_page_dirty(struct page * page)2824 void kvm_release_page_dirty(struct page *page)
2825 {
2826 	if (!page)
2827 		return;
2828 
2829 	kvm_set_page_dirty(page);
2830 	kvm_release_page_clean(page);
2831 }
2832 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
2833 
kvm_resolve_pfn(struct kvm_follow_pfn * kfp,struct page * page,struct follow_pfnmap_args * map,bool writable)2834 static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
2835 				 struct follow_pfnmap_args *map, bool writable)
2836 {
2837 	kvm_pfn_t pfn;
2838 
2839 	WARN_ON_ONCE(!!page == !!map);
2840 
2841 	if (kfp->map_writable)
2842 		*kfp->map_writable = writable;
2843 
2844 	if (map)
2845 		pfn = map->pfn;
2846 	else
2847 		pfn = page_to_pfn(page);
2848 
2849 	*kfp->refcounted_page = page;
2850 
2851 	return pfn;
2852 }
2853 
2854 /*
2855  * The fast path to get the writable pfn which will be stored in @pfn,
2856  * true indicates success, otherwise false is returned.
2857  */
hva_to_pfn_fast(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2858 static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2859 {
2860 	struct page *page;
2861 	bool r;
2862 
2863 	/*
2864 	 * Try the fast-only path when the caller wants to pin/get the page for
2865 	 * writing.  If the caller only wants to read the page, KVM must go
2866 	 * down the full, slow path in order to avoid racing an operation that
2867 	 * breaks Copy-on-Write (CoW), e.g. so that KVM doesn't end up pointing
2868 	 * at the old, read-only page while mm/ points at a new, writable page.
2869 	 */
2870 	if (!((kfp->flags & FOLL_WRITE) || kfp->map_writable))
2871 		return false;
2872 
2873 	if (kfp->pin)
2874 		r = pin_user_pages_fast(kfp->hva, 1, FOLL_WRITE, &page) == 1;
2875 	else
2876 		r = get_user_page_fast_only(kfp->hva, FOLL_WRITE, &page);
2877 
2878 	if (r) {
2879 		*pfn = kvm_resolve_pfn(kfp, page, NULL, true);
2880 		return true;
2881 	}
2882 
2883 	return false;
2884 }
2885 
2886 /*
2887  * The slow path to get the pfn of the specified host virtual address,
2888  * 1 indicates success, -errno is returned if error is detected.
2889  */
hva_to_pfn_slow(struct kvm_follow_pfn * kfp,kvm_pfn_t * pfn)2890 static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
2891 {
2892 	/*
2893 	 * When a VCPU accesses a page that is not mapped into the secondary
2894 	 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2895 	 * make progress. We always want to honor NUMA hinting faults in that
2896 	 * case, because GUP usage corresponds to memory accesses from the VCPU.
2897 	 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2898 	 * mapped into the secondary MMU and gets accessed by a VCPU.
2899 	 *
2900 	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2901 	 * implicitly honor NUMA hinting faults and don't need this flag.
2902 	 */
2903 	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT | kfp->flags;
2904 	struct page *page, *wpage;
2905 	int npages;
2906 
2907 	if (kfp->pin)
2908 		npages = pin_user_pages_unlocked(kfp->hva, 1, &page, flags);
2909 	else
2910 		npages = get_user_pages_unlocked(kfp->hva, 1, &page, flags);
2911 	if (npages != 1)
2912 		return npages;
2913 
2914 	/*
2915 	 * Pinning is mutually exclusive with opportunistically mapping a read
2916 	 * fault as writable, as KVM should never pin pages when mapping memory
2917 	 * into the guest (pinning is only for direct accesses from KVM).
2918 	 */
2919 	if (WARN_ON_ONCE(kfp->map_writable && kfp->pin))
2920 		goto out;
2921 
2922 	/* map read fault as writable if possible */
2923 	if (!(flags & FOLL_WRITE) && kfp->map_writable &&
2924 	    get_user_page_fast_only(kfp->hva, FOLL_WRITE, &wpage)) {
2925 		put_page(page);
2926 		page = wpage;
2927 		flags |= FOLL_WRITE;
2928 	}
2929 
2930 out:
2931 	*pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE);
2932 	return npages;
2933 }
2934 
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2935 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2936 {
2937 	if (unlikely(!(vma->vm_flags & VM_READ)))
2938 		return false;
2939 
2940 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2941 		return false;
2942 
2943 	return true;
2944 }
2945 
hva_to_pfn_remapped(struct vm_area_struct * vma,struct kvm_follow_pfn * kfp,kvm_pfn_t * p_pfn)2946 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2947 			       struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn)
2948 {
2949 	struct follow_pfnmap_args args = { .vma = vma, .address = kfp->hva };
2950 	bool write_fault = kfp->flags & FOLL_WRITE;
2951 	int r;
2952 
2953 	/*
2954 	 * Remapped memory cannot be pinned in any meaningful sense.  Bail if
2955 	 * the caller wants to pin the page, i.e. access the page outside of
2956 	 * MMU notifier protection, and unsafe umappings are disallowed.
2957 	 */
2958 	if (kfp->pin && !allow_unsafe_mappings)
2959 		return -EINVAL;
2960 
2961 	r = follow_pfnmap_start(&args);
2962 	if (r) {
2963 		/*
2964 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2965 		 * not call the fault handler, so do it here.
2966 		 */
2967 		bool unlocked = false;
2968 		r = fixup_user_fault(current->mm, kfp->hva,
2969 				     (write_fault ? FAULT_FLAG_WRITE : 0),
2970 				     &unlocked);
2971 		if (unlocked)
2972 			return -EAGAIN;
2973 		if (r)
2974 			return r;
2975 
2976 		r = follow_pfnmap_start(&args);
2977 		if (r)
2978 			return r;
2979 	}
2980 
2981 	if (write_fault && !args.writable) {
2982 		*p_pfn = KVM_PFN_ERR_RO_FAULT;
2983 		goto out;
2984 	}
2985 
2986 	*p_pfn = kvm_resolve_pfn(kfp, NULL, &args, args.writable);
2987 out:
2988 	follow_pfnmap_end(&args);
2989 	return r;
2990 }
2991 
hva_to_pfn(struct kvm_follow_pfn * kfp)2992 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp)
2993 {
2994 	struct vm_area_struct *vma;
2995 	kvm_pfn_t pfn;
2996 	int npages, r;
2997 
2998 	might_sleep();
2999 
3000 	if (WARN_ON_ONCE(!kfp->refcounted_page))
3001 		return KVM_PFN_ERR_FAULT;
3002 
3003 	if (hva_to_pfn_fast(kfp, &pfn))
3004 		return pfn;
3005 
3006 	npages = hva_to_pfn_slow(kfp, &pfn);
3007 	if (npages == 1)
3008 		return pfn;
3009 	if (npages == -EINTR || npages == -EAGAIN)
3010 		return KVM_PFN_ERR_SIGPENDING;
3011 	if (npages == -EHWPOISON)
3012 		return KVM_PFN_ERR_HWPOISON;
3013 
3014 	mmap_read_lock(current->mm);
3015 retry:
3016 	vma = vma_lookup(current->mm, kfp->hva);
3017 
3018 	if (vma == NULL)
3019 		pfn = KVM_PFN_ERR_FAULT;
3020 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
3021 		r = hva_to_pfn_remapped(vma, kfp, &pfn);
3022 		if (r == -EAGAIN)
3023 			goto retry;
3024 		if (r < 0)
3025 			pfn = KVM_PFN_ERR_FAULT;
3026 	} else {
3027 		if ((kfp->flags & FOLL_NOWAIT) &&
3028 		    vma_is_valid(vma, kfp->flags & FOLL_WRITE))
3029 			pfn = KVM_PFN_ERR_NEEDS_IO;
3030 		else
3031 			pfn = KVM_PFN_ERR_FAULT;
3032 	}
3033 	mmap_read_unlock(current->mm);
3034 	return pfn;
3035 }
3036 
kvm_follow_pfn(struct kvm_follow_pfn * kfp)3037 static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp)
3038 {
3039 	kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL,
3040 				     kfp->flags & FOLL_WRITE);
3041 
3042 	if (kfp->hva == KVM_HVA_ERR_RO_BAD)
3043 		return KVM_PFN_ERR_RO_FAULT;
3044 
3045 	if (kvm_is_error_hva(kfp->hva))
3046 		return KVM_PFN_NOSLOT;
3047 
3048 	if (memslot_is_readonly(kfp->slot) && kfp->map_writable) {
3049 		*kfp->map_writable = false;
3050 		kfp->map_writable = NULL;
3051 	}
3052 
3053 	return hva_to_pfn(kfp);
3054 }
3055 
__kvm_faultin_pfn(const struct kvm_memory_slot * slot,gfn_t gfn,unsigned int foll,bool * writable,struct page ** refcounted_page)3056 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
3057 			    unsigned int foll, bool *writable,
3058 			    struct page **refcounted_page)
3059 {
3060 	struct kvm_follow_pfn kfp = {
3061 		.slot = slot,
3062 		.gfn = gfn,
3063 		.flags = foll,
3064 		.map_writable = writable,
3065 		.refcounted_page = refcounted_page,
3066 	};
3067 
3068 	if (WARN_ON_ONCE(!writable || !refcounted_page))
3069 		return KVM_PFN_ERR_FAULT;
3070 
3071 	*writable = false;
3072 	*refcounted_page = NULL;
3073 
3074 	return kvm_follow_pfn(&kfp);
3075 }
3076 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
3077 
kvm_prefetch_pages(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)3078 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
3079 		       struct page **pages, int nr_pages)
3080 {
3081 	unsigned long addr;
3082 	gfn_t entry = 0;
3083 
3084 	addr = gfn_to_hva_many(slot, gfn, &entry);
3085 	if (kvm_is_error_hva(addr))
3086 		return -1;
3087 
3088 	if (entry < nr_pages)
3089 		return 0;
3090 
3091 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3092 }
3093 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
3094 
3095 /*
3096  * Don't use this API unless you are absolutely, positively certain that KVM
3097  * needs to get a struct page, e.g. to pin the page for firmware DMA.
3098  *
3099  * FIXME: Users of this API likely need to FOLL_PIN the page, not just elevate
3100  *	  its refcount.
3101  */
__gfn_to_page(struct kvm * kvm,gfn_t gfn,bool write)3102 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
3103 {
3104 	struct page *refcounted_page = NULL;
3105 	struct kvm_follow_pfn kfp = {
3106 		.slot = gfn_to_memslot(kvm, gfn),
3107 		.gfn = gfn,
3108 		.flags = write ? FOLL_WRITE : 0,
3109 		.refcounted_page = &refcounted_page,
3110 	};
3111 
3112 	(void)kvm_follow_pfn(&kfp);
3113 	return refcounted_page;
3114 }
3115 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
3116 
__kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map,bool writable)3117 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
3118 		   bool writable)
3119 {
3120 	struct kvm_follow_pfn kfp = {
3121 		.slot = gfn_to_memslot(vcpu->kvm, gfn),
3122 		.gfn = gfn,
3123 		.flags = writable ? FOLL_WRITE : 0,
3124 		.refcounted_page = &map->pinned_page,
3125 		.pin = true,
3126 	};
3127 
3128 	map->pinned_page = NULL;
3129 	map->page = NULL;
3130 	map->hva = NULL;
3131 	map->gfn = gfn;
3132 	map->writable = writable;
3133 
3134 	map->pfn = kvm_follow_pfn(&kfp);
3135 	if (is_error_noslot_pfn(map->pfn))
3136 		return -EINVAL;
3137 
3138 	if (pfn_valid(map->pfn)) {
3139 		map->page = pfn_to_page(map->pfn);
3140 		map->hva = kmap(map->page);
3141 #ifdef CONFIG_HAS_IOMEM
3142 	} else {
3143 		map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
3144 #endif
3145 	}
3146 
3147 	return map->hva ? 0 : -EFAULT;
3148 }
3149 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
3150 
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map)3151 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
3152 {
3153 	if (!map->hva)
3154 		return;
3155 
3156 	if (map->page)
3157 		kunmap(map->page);
3158 #ifdef CONFIG_HAS_IOMEM
3159 	else
3160 		memunmap(map->hva);
3161 #endif
3162 
3163 	if (map->writable)
3164 		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3165 
3166 	if (map->pinned_page) {
3167 		if (map->writable)
3168 			kvm_set_page_dirty(map->pinned_page);
3169 		kvm_set_page_accessed(map->pinned_page);
3170 		unpin_user_page(map->pinned_page);
3171 	}
3172 
3173 	map->hva = NULL;
3174 	map->page = NULL;
3175 	map->pinned_page = NULL;
3176 }
3177 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
3178 
next_segment(unsigned long len,int offset)3179 static int next_segment(unsigned long len, int offset)
3180 {
3181 	if (len > PAGE_SIZE - offset)
3182 		return PAGE_SIZE - offset;
3183 	else
3184 		return len;
3185 }
3186 
3187 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)3188 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3189 				 void *data, int offset, int len)
3190 {
3191 	int r;
3192 	unsigned long addr;
3193 
3194 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3195 		return -EFAULT;
3196 
3197 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3198 	if (kvm_is_error_hva(addr))
3199 		return -EFAULT;
3200 	r = __copy_from_user(data, (void __user *)addr + offset, len);
3201 	if (r)
3202 		return -EFAULT;
3203 	return 0;
3204 }
3205 
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)3206 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3207 			int len)
3208 {
3209 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3210 
3211 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3212 }
3213 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
3214 
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)3215 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3216 			     int offset, int len)
3217 {
3218 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3219 
3220 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3221 }
3222 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
3223 
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)3224 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3225 {
3226 	gfn_t gfn = gpa >> PAGE_SHIFT;
3227 	int seg;
3228 	int offset = offset_in_page(gpa);
3229 	int ret;
3230 
3231 	while ((seg = next_segment(len, offset)) != 0) {
3232 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3233 		if (ret < 0)
3234 			return ret;
3235 		offset = 0;
3236 		len -= seg;
3237 		data += seg;
3238 		++gfn;
3239 	}
3240 	return 0;
3241 }
3242 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
3243 
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3244 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3245 {
3246 	gfn_t gfn = gpa >> PAGE_SHIFT;
3247 	int seg;
3248 	int offset = offset_in_page(gpa);
3249 	int ret;
3250 
3251 	while ((seg = next_segment(len, offset)) != 0) {
3252 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3253 		if (ret < 0)
3254 			return ret;
3255 		offset = 0;
3256 		len -= seg;
3257 		data += seg;
3258 		++gfn;
3259 	}
3260 	return 0;
3261 }
3262 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
3263 
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)3264 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3265 			           void *data, int offset, unsigned long len)
3266 {
3267 	int r;
3268 	unsigned long addr;
3269 
3270 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3271 		return -EFAULT;
3272 
3273 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3274 	if (kvm_is_error_hva(addr))
3275 		return -EFAULT;
3276 	pagefault_disable();
3277 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3278 	pagefault_enable();
3279 	if (r)
3280 		return -EFAULT;
3281 	return 0;
3282 }
3283 
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)3284 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3285 			       void *data, unsigned long len)
3286 {
3287 	gfn_t gfn = gpa >> PAGE_SHIFT;
3288 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3289 	int offset = offset_in_page(gpa);
3290 
3291 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3292 }
3293 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
3294 
3295 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)3296 static int __kvm_write_guest_page(struct kvm *kvm,
3297 				  struct kvm_memory_slot *memslot, gfn_t gfn,
3298 			          const void *data, int offset, int len)
3299 {
3300 	int r;
3301 	unsigned long addr;
3302 
3303 	if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
3304 		return -EFAULT;
3305 
3306 	addr = gfn_to_hva_memslot(memslot, gfn);
3307 	if (kvm_is_error_hva(addr))
3308 		return -EFAULT;
3309 	r = __copy_to_user((void __user *)addr + offset, data, len);
3310 	if (r)
3311 		return -EFAULT;
3312 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3313 	return 0;
3314 }
3315 
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)3316 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3317 			 const void *data, int offset, int len)
3318 {
3319 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3320 
3321 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3322 }
3323 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
3324 
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)3325 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3326 			      const void *data, int offset, int len)
3327 {
3328 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3329 
3330 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3331 }
3332 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
3333 
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)3334 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3335 		    unsigned long len)
3336 {
3337 	gfn_t gfn = gpa >> PAGE_SHIFT;
3338 	int seg;
3339 	int offset = offset_in_page(gpa);
3340 	int ret;
3341 
3342 	while ((seg = next_segment(len, offset)) != 0) {
3343 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3344 		if (ret < 0)
3345 			return ret;
3346 		offset = 0;
3347 		len -= seg;
3348 		data += seg;
3349 		++gfn;
3350 	}
3351 	return 0;
3352 }
3353 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
3354 
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)3355 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3356 		         unsigned long len)
3357 {
3358 	gfn_t gfn = gpa >> PAGE_SHIFT;
3359 	int seg;
3360 	int offset = offset_in_page(gpa);
3361 	int ret;
3362 
3363 	while ((seg = next_segment(len, offset)) != 0) {
3364 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3365 		if (ret < 0)
3366 			return ret;
3367 		offset = 0;
3368 		len -= seg;
3369 		data += seg;
3370 		++gfn;
3371 	}
3372 	return 0;
3373 }
3374 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
3375 
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3376 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3377 				       struct gfn_to_hva_cache *ghc,
3378 				       gpa_t gpa, unsigned long len)
3379 {
3380 	int offset = offset_in_page(gpa);
3381 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3382 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3383 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3384 	gfn_t nr_pages_avail;
3385 
3386 	/* Update ghc->generation before performing any error checks. */
3387 	ghc->generation = slots->generation;
3388 
3389 	if (start_gfn > end_gfn) {
3390 		ghc->hva = KVM_HVA_ERR_BAD;
3391 		return -EINVAL;
3392 	}
3393 
3394 	/*
3395 	 * If the requested region crosses two memslots, we still
3396 	 * verify that the entire region is valid here.
3397 	 */
3398 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3399 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3400 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3401 					   &nr_pages_avail);
3402 		if (kvm_is_error_hva(ghc->hva))
3403 			return -EFAULT;
3404 	}
3405 
3406 	/* Use the slow path for cross page reads and writes. */
3407 	if (nr_pages_needed == 1)
3408 		ghc->hva += offset;
3409 	else
3410 		ghc->memslot = NULL;
3411 
3412 	ghc->gpa = gpa;
3413 	ghc->len = len;
3414 	return 0;
3415 }
3416 
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)3417 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3418 			      gpa_t gpa, unsigned long len)
3419 {
3420 	struct kvm_memslots *slots = kvm_memslots(kvm);
3421 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3422 }
3423 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
3424 
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3425 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3426 				  void *data, unsigned int offset,
3427 				  unsigned long len)
3428 {
3429 	struct kvm_memslots *slots = kvm_memslots(kvm);
3430 	int r;
3431 	gpa_t gpa = ghc->gpa + offset;
3432 
3433 	if (WARN_ON_ONCE(len + offset > ghc->len))
3434 		return -EINVAL;
3435 
3436 	if (slots->generation != ghc->generation) {
3437 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3438 			return -EFAULT;
3439 	}
3440 
3441 	if (kvm_is_error_hva(ghc->hva))
3442 		return -EFAULT;
3443 
3444 	if (unlikely(!ghc->memslot))
3445 		return kvm_write_guest(kvm, gpa, data, len);
3446 
3447 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3448 	if (r)
3449 		return -EFAULT;
3450 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3451 
3452 	return 0;
3453 }
3454 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
3455 
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3456 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3457 			   void *data, unsigned long len)
3458 {
3459 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3460 }
3461 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
3462 
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3463 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3464 				 void *data, unsigned int offset,
3465 				 unsigned long len)
3466 {
3467 	struct kvm_memslots *slots = kvm_memslots(kvm);
3468 	int r;
3469 	gpa_t gpa = ghc->gpa + offset;
3470 
3471 	if (WARN_ON_ONCE(len + offset > ghc->len))
3472 		return -EINVAL;
3473 
3474 	if (slots->generation != ghc->generation) {
3475 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3476 			return -EFAULT;
3477 	}
3478 
3479 	if (kvm_is_error_hva(ghc->hva))
3480 		return -EFAULT;
3481 
3482 	if (unlikely(!ghc->memslot))
3483 		return kvm_read_guest(kvm, gpa, data, len);
3484 
3485 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3486 	if (r)
3487 		return -EFAULT;
3488 
3489 	return 0;
3490 }
3491 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
3492 
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3493 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3494 			  void *data, unsigned long len)
3495 {
3496 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3497 }
3498 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
3499 
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3500 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3501 {
3502 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3503 	gfn_t gfn = gpa >> PAGE_SHIFT;
3504 	int seg;
3505 	int offset = offset_in_page(gpa);
3506 	int ret;
3507 
3508 	while ((seg = next_segment(len, offset)) != 0) {
3509 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
3510 		if (ret < 0)
3511 			return ret;
3512 		offset = 0;
3513 		len -= seg;
3514 		++gfn;
3515 	}
3516 	return 0;
3517 }
3518 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
3519 
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)3520 void mark_page_dirty_in_slot(struct kvm *kvm,
3521 			     const struct kvm_memory_slot *memslot,
3522 		 	     gfn_t gfn)
3523 {
3524 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3525 
3526 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3527 	if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3528 		return;
3529 
3530 	WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3531 #endif
3532 
3533 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3534 		unsigned long rel_gfn = gfn - memslot->base_gfn;
3535 		u32 slot = (memslot->as_id << 16) | memslot->id;
3536 
3537 		if (kvm->dirty_ring_size && vcpu)
3538 			kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3539 		else if (memslot->dirty_bitmap)
3540 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
3541 	}
3542 }
3543 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
3544 
mark_page_dirty(struct kvm * kvm,gfn_t gfn)3545 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3546 {
3547 	struct kvm_memory_slot *memslot;
3548 
3549 	memslot = gfn_to_memslot(kvm, gfn);
3550 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3551 }
3552 EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
3553 
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)3554 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3555 {
3556 	struct kvm_memory_slot *memslot;
3557 
3558 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3559 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3560 }
3561 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
3562 
kvm_sigset_activate(struct kvm_vcpu * vcpu)3563 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3564 {
3565 	if (!vcpu->sigset_active)
3566 		return;
3567 
3568 	/*
3569 	 * This does a lockless modification of ->real_blocked, which is fine
3570 	 * because, only current can change ->real_blocked and all readers of
3571 	 * ->real_blocked don't care as long ->real_blocked is always a subset
3572 	 * of ->blocked.
3573 	 */
3574 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3575 }
3576 
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3577 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3578 {
3579 	if (!vcpu->sigset_active)
3580 		return;
3581 
3582 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3583 	sigemptyset(&current->real_blocked);
3584 }
3585 
grow_halt_poll_ns(struct kvm_vcpu * vcpu)3586 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3587 {
3588 	unsigned int old, val, grow, grow_start;
3589 
3590 	old = val = vcpu->halt_poll_ns;
3591 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3592 	grow = READ_ONCE(halt_poll_ns_grow);
3593 	if (!grow)
3594 		goto out;
3595 
3596 	val *= grow;
3597 	if (val < grow_start)
3598 		val = grow_start;
3599 
3600 	vcpu->halt_poll_ns = val;
3601 out:
3602 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3603 }
3604 
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3605 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3606 {
3607 	unsigned int old, val, shrink, grow_start;
3608 
3609 	old = val = vcpu->halt_poll_ns;
3610 	shrink = READ_ONCE(halt_poll_ns_shrink);
3611 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3612 	if (shrink == 0)
3613 		val = 0;
3614 	else
3615 		val /= shrink;
3616 
3617 	if (val < grow_start)
3618 		val = 0;
3619 
3620 	vcpu->halt_poll_ns = val;
3621 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3622 }
3623 
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)3624 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3625 {
3626 	int ret = -EINTR;
3627 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3628 
3629 	if (kvm_arch_vcpu_runnable(vcpu))
3630 		goto out;
3631 	if (kvm_cpu_has_pending_timer(vcpu))
3632 		goto out;
3633 	if (signal_pending(current))
3634 		goto out;
3635 	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3636 		goto out;
3637 
3638 	ret = 0;
3639 out:
3640 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3641 	return ret;
3642 }
3643 
3644 /*
3645  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3646  * pending.  This is mostly used when halting a vCPU, but may also be used
3647  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3648  */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3649 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3650 {
3651 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3652 	bool waited = false;
3653 
3654 	vcpu->stat.generic.blocking = 1;
3655 
3656 	preempt_disable();
3657 	kvm_arch_vcpu_blocking(vcpu);
3658 	prepare_to_rcuwait(wait);
3659 	preempt_enable();
3660 
3661 	for (;;) {
3662 		set_current_state(TASK_INTERRUPTIBLE);
3663 
3664 		if (kvm_vcpu_check_block(vcpu) < 0)
3665 			break;
3666 
3667 		waited = true;
3668 		schedule();
3669 	}
3670 
3671 	preempt_disable();
3672 	finish_rcuwait(wait);
3673 	kvm_arch_vcpu_unblocking(vcpu);
3674 	preempt_enable();
3675 
3676 	vcpu->stat.generic.blocking = 0;
3677 
3678 	return waited;
3679 }
3680 
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)3681 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3682 					  ktime_t end, bool success)
3683 {
3684 	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3685 	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3686 
3687 	++vcpu->stat.generic.halt_attempted_poll;
3688 
3689 	if (success) {
3690 		++vcpu->stat.generic.halt_successful_poll;
3691 
3692 		if (!vcpu_valid_wakeup(vcpu))
3693 			++vcpu->stat.generic.halt_poll_invalid;
3694 
3695 		stats->halt_poll_success_ns += poll_ns;
3696 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3697 	} else {
3698 		stats->halt_poll_fail_ns += poll_ns;
3699 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3700 	}
3701 }
3702 
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3703 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3704 {
3705 	struct kvm *kvm = vcpu->kvm;
3706 
3707 	if (kvm->override_halt_poll_ns) {
3708 		/*
3709 		 * Ensure kvm->max_halt_poll_ns is not read before
3710 		 * kvm->override_halt_poll_ns.
3711 		 *
3712 		 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3713 		 */
3714 		smp_rmb();
3715 		return READ_ONCE(kvm->max_halt_poll_ns);
3716 	}
3717 
3718 	return READ_ONCE(halt_poll_ns);
3719 }
3720 
3721 /*
3722  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3723  * polling is enabled, busy wait for a short time before blocking to avoid the
3724  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3725  * is halted.
3726  */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3727 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3728 {
3729 	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3730 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3731 	ktime_t start, cur, poll_end;
3732 	bool waited = false;
3733 	bool do_halt_poll;
3734 	u64 halt_ns;
3735 
3736 	if (vcpu->halt_poll_ns > max_halt_poll_ns)
3737 		vcpu->halt_poll_ns = max_halt_poll_ns;
3738 
3739 	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3740 
3741 	start = cur = poll_end = ktime_get();
3742 	if (do_halt_poll) {
3743 		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3744 
3745 		do {
3746 			if (kvm_vcpu_check_block(vcpu) < 0)
3747 				goto out;
3748 			cpu_relax();
3749 			poll_end = cur = ktime_get();
3750 		} while (kvm_vcpu_can_poll(cur, stop));
3751 	}
3752 
3753 	waited = kvm_vcpu_block(vcpu);
3754 
3755 	cur = ktime_get();
3756 	if (waited) {
3757 		vcpu->stat.generic.halt_wait_ns +=
3758 			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3759 		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3760 				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3761 	}
3762 out:
3763 	/* The total time the vCPU was "halted", including polling time. */
3764 	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3765 
3766 	/*
3767 	 * Note, halt-polling is considered successful so long as the vCPU was
3768 	 * never actually scheduled out, i.e. even if the wake event arrived
3769 	 * after of the halt-polling loop itself, but before the full wait.
3770 	 */
3771 	if (do_halt_poll)
3772 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
3773 
3774 	if (halt_poll_allowed) {
3775 		/* Recompute the max halt poll time in case it changed. */
3776 		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3777 
3778 		if (!vcpu_valid_wakeup(vcpu)) {
3779 			shrink_halt_poll_ns(vcpu);
3780 		} else if (max_halt_poll_ns) {
3781 			if (halt_ns <= vcpu->halt_poll_ns)
3782 				;
3783 			/* we had a long block, shrink polling */
3784 			else if (vcpu->halt_poll_ns &&
3785 				 halt_ns > max_halt_poll_ns)
3786 				shrink_halt_poll_ns(vcpu);
3787 			/* we had a short halt and our poll time is too small */
3788 			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3789 				 halt_ns < max_halt_poll_ns)
3790 				grow_halt_poll_ns(vcpu);
3791 		} else {
3792 			vcpu->halt_poll_ns = 0;
3793 		}
3794 	}
3795 
3796 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3797 }
3798 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
3799 
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3800 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3801 {
3802 	if (__kvm_vcpu_wake_up(vcpu)) {
3803 		WRITE_ONCE(vcpu->ready, true);
3804 		++vcpu->stat.generic.halt_wakeup;
3805 		return true;
3806 	}
3807 
3808 	return false;
3809 }
3810 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
3811 
3812 #ifndef CONFIG_S390
3813 /*
3814  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3815  */
__kvm_vcpu_kick(struct kvm_vcpu * vcpu,bool wait)3816 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
3817 {
3818 	int me, cpu;
3819 
3820 	if (kvm_vcpu_wake_up(vcpu))
3821 		return;
3822 
3823 	me = get_cpu();
3824 	/*
3825 	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3826 	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3827 	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3828 	 * within the vCPU thread itself.
3829 	 */
3830 	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3831 		if (vcpu->mode == IN_GUEST_MODE)
3832 			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3833 		goto out;
3834 	}
3835 
3836 	/*
3837 	 * Note, the vCPU could get migrated to a different pCPU at any point
3838 	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3839 	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3840 	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3841 	 * vCPU also requires it to leave IN_GUEST_MODE.
3842 	 */
3843 	if (kvm_arch_vcpu_should_kick(vcpu)) {
3844 		cpu = READ_ONCE(vcpu->cpu);
3845 		if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu)) {
3846 			/*
3847 			 * Use a reschedule IPI to kick the vCPU if the caller
3848 			 * doesn't need to wait for a response, as KVM allows
3849 			 * kicking vCPUs while IRQs are disabled, but using the
3850 			 * SMP function call framework with IRQs disabled can
3851 			 * deadlock due to taking cross-CPU locks.
3852 			 */
3853 			if (wait)
3854 				smp_call_function_single(cpu, ack_kick, NULL, wait);
3855 			else
3856 				smp_send_reschedule(cpu);
3857 		}
3858 	}
3859 out:
3860 	put_cpu();
3861 }
3862 EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
3863 #endif /* !CONFIG_S390 */
3864 
kvm_vcpu_yield_to(struct kvm_vcpu * target)3865 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3866 {
3867 	struct task_struct *task = NULL;
3868 	int ret;
3869 
3870 	if (!read_trylock(&target->pid_lock))
3871 		return 0;
3872 
3873 	if (target->pid)
3874 		task = get_pid_task(target->pid, PIDTYPE_PID);
3875 
3876 	read_unlock(&target->pid_lock);
3877 
3878 	if (!task)
3879 		return 0;
3880 	ret = yield_to(task, 1);
3881 	put_task_struct(task);
3882 
3883 	return ret;
3884 }
3885 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
3886 
3887 /*
3888  * Helper that checks whether a VCPU is eligible for directed yield.
3889  * Most eligible candidate to yield is decided by following heuristics:
3890  *
3891  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3892  *  (preempted lock holder), indicated by @in_spin_loop.
3893  *  Set at the beginning and cleared at the end of interception/PLE handler.
3894  *
3895  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3896  *  chance last time (mostly it has become eligible now since we have probably
3897  *  yielded to lockholder in last iteration. This is done by toggling
3898  *  @dy_eligible each time a VCPU checked for eligibility.)
3899  *
3900  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3901  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3902  *  burning. Giving priority for a potential lock-holder increases lock
3903  *  progress.
3904  *
3905  *  Since algorithm is based on heuristics, accessing another VCPU data without
3906  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3907  *  and continue with next VCPU and so on.
3908  */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)3909 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3910 {
3911 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3912 	bool eligible;
3913 
3914 	eligible = !vcpu->spin_loop.in_spin_loop ||
3915 		    vcpu->spin_loop.dy_eligible;
3916 
3917 	if (vcpu->spin_loop.in_spin_loop)
3918 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3919 
3920 	return eligible;
3921 #else
3922 	return true;
3923 #endif
3924 }
3925 
3926 /*
3927  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3928  * a vcpu_load/vcpu_put pair.  However, for most architectures
3929  * kvm_arch_vcpu_runnable does not require vcpu_load.
3930  */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3931 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3932 {
3933 	return kvm_arch_vcpu_runnable(vcpu);
3934 }
3935 
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3936 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3937 {
3938 	if (kvm_arch_dy_runnable(vcpu))
3939 		return true;
3940 
3941 #ifdef CONFIG_KVM_ASYNC_PF
3942 	if (!list_empty_careful(&vcpu->async_pf.done))
3943 		return true;
3944 #endif
3945 
3946 	return false;
3947 }
3948 
3949 /*
3950  * By default, simply query the target vCPU's current mode when checking if a
3951  * vCPU was preempted in kernel mode.  All architectures except x86 (or more
3952  * specifical, except VMX) allow querying whether or not a vCPU is in kernel
3953  * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
3954  * directly for cross-vCPU checks is functionally correct and accurate.
3955  */
kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu * vcpu)3956 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
3957 {
3958 	return kvm_arch_vcpu_in_kernel(vcpu);
3959 }
3960 
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)3961 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3962 {
3963 	return false;
3964 }
3965 
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)3966 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3967 {
3968 	int nr_vcpus, start, i, idx, yielded;
3969 	struct kvm *kvm = me->kvm;
3970 	struct kvm_vcpu *vcpu;
3971 	int try = 3;
3972 
3973 	nr_vcpus = atomic_read(&kvm->online_vcpus);
3974 	if (nr_vcpus < 2)
3975 		return;
3976 
3977 	/* Pairs with the smp_wmb() in kvm_vm_ioctl_create_vcpu(). */
3978 	smp_rmb();
3979 
3980 	kvm_vcpu_set_in_spin_loop(me, true);
3981 
3982 	/*
3983 	 * The current vCPU ("me") is spinning in kernel mode, i.e. is likely
3984 	 * waiting for a resource to become available.  Attempt to yield to a
3985 	 * vCPU that is runnable, but not currently running, e.g. because the
3986 	 * vCPU was preempted by a higher priority task.  With luck, the vCPU
3987 	 * that was preempted is holding a lock or some other resource that the
3988 	 * current vCPU is waiting to acquire, and yielding to the other vCPU
3989 	 * will allow it to make forward progress and release the lock (or kick
3990 	 * the spinning vCPU, etc).
3991 	 *
3992 	 * Since KVM has no insight into what exactly the guest is doing,
3993 	 * approximate a round-robin selection by iterating over all vCPUs,
3994 	 * starting at the last boosted vCPU.  I.e. if N=kvm->last_boosted_vcpu,
3995 	 * iterate over vCPU[N+1]..vCPU[N-1], wrapping as needed.
3996 	 *
3997 	 * Note, this is inherently racy, e.g. if multiple vCPUs are spinning,
3998 	 * they may all try to yield to the same vCPU(s).  But as above, this
3999 	 * is all best effort due to KVM's lack of visibility into the guest.
4000 	 */
4001 	start = READ_ONCE(kvm->last_boosted_vcpu) + 1;
4002 	for (i = 0; i < nr_vcpus; i++) {
4003 		idx = (start + i) % nr_vcpus;
4004 		if (idx == me->vcpu_idx)
4005 			continue;
4006 
4007 		vcpu = xa_load(&kvm->vcpu_array, idx);
4008 		if (!READ_ONCE(vcpu->ready))
4009 			continue;
4010 		if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4011 			continue;
4012 
4013 		/*
4014 		 * Treat the target vCPU as being in-kernel if it has a pending
4015 		 * interrupt, as the vCPU trying to yield may be spinning
4016 		 * waiting on IPI delivery, i.e. the target vCPU is in-kernel
4017 		 * for the purposes of directed yield.
4018 		 */
4019 		if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4020 		    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4021 		    !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4022 			continue;
4023 
4024 		if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4025 			continue;
4026 
4027 		yielded = kvm_vcpu_yield_to(vcpu);
4028 		if (yielded > 0) {
4029 			WRITE_ONCE(kvm->last_boosted_vcpu, i);
4030 			break;
4031 		} else if (yielded < 0 && !--try) {
4032 			break;
4033 		}
4034 	}
4035 	kvm_vcpu_set_in_spin_loop(me, false);
4036 
4037 	/* Ensure vcpu is not eligible during next spinloop */
4038 	kvm_vcpu_set_dy_eligible(me, false);
4039 }
4040 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
4041 
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)4042 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4043 {
4044 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4045 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4046 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4047 	     kvm->dirty_ring_size / PAGE_SIZE);
4048 #else
4049 	return false;
4050 #endif
4051 }
4052 
kvm_vcpu_fault(struct vm_fault * vmf)4053 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4054 {
4055 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4056 	struct page *page;
4057 
4058 	if (vmf->pgoff == 0)
4059 		page = virt_to_page(vcpu->run);
4060 #ifdef CONFIG_X86
4061 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4062 		page = virt_to_page(vcpu->arch.pio_data);
4063 #endif
4064 #ifdef CONFIG_KVM_MMIO
4065 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4066 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4067 #endif
4068 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4069 		page = kvm_dirty_ring_get_page(
4070 		    &vcpu->dirty_ring,
4071 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4072 	else
4073 		return kvm_arch_vcpu_fault(vcpu, vmf);
4074 	get_page(page);
4075 	vmf->page = page;
4076 	return 0;
4077 }
4078 
4079 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4080 	.fault = kvm_vcpu_fault,
4081 };
4082 
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)4083 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4084 {
4085 	struct kvm_vcpu *vcpu = file->private_data;
4086 	unsigned long pages = vma_pages(vma);
4087 
4088 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4089 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4090 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4091 		return -EINVAL;
4092 
4093 	vma->vm_ops = &kvm_vcpu_vm_ops;
4094 	return 0;
4095 }
4096 
kvm_vcpu_release(struct inode * inode,struct file * filp)4097 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4098 {
4099 	struct kvm_vcpu *vcpu = filp->private_data;
4100 
4101 	kvm_put_kvm(vcpu->kvm);
4102 	return 0;
4103 }
4104 
4105 static struct file_operations kvm_vcpu_fops = {
4106 	.release        = kvm_vcpu_release,
4107 	.unlocked_ioctl = kvm_vcpu_ioctl,
4108 	.mmap           = kvm_vcpu_mmap,
4109 	.llseek		= noop_llseek,
4110 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
4111 };
4112 
4113 /*
4114  * Allocates an inode for the vcpu.
4115  */
create_vcpu_fd(struct kvm_vcpu * vcpu)4116 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4117 {
4118 	char name[8 + 1 + ITOA_MAX_LEN + 1];
4119 
4120 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4121 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4122 }
4123 
4124 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)4125 static int vcpu_get_pid(void *data, u64 *val)
4126 {
4127 	struct kvm_vcpu *vcpu = data;
4128 
4129 	read_lock(&vcpu->pid_lock);
4130 	*val = pid_nr(vcpu->pid);
4131 	read_unlock(&vcpu->pid_lock);
4132 	return 0;
4133 }
4134 
4135 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4136 
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)4137 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4138 {
4139 	struct dentry *debugfs_dentry;
4140 	char dir_name[ITOA_MAX_LEN * 2];
4141 
4142 	if (!debugfs_initialized())
4143 		return;
4144 
4145 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4146 	debugfs_dentry = debugfs_create_dir(dir_name,
4147 					    vcpu->kvm->debugfs_dentry);
4148 	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4149 			    &vcpu_get_pid_fops);
4150 
4151 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4152 }
4153 #endif
4154 
4155 /*
4156  * Creates some virtual cpus.  Good luck creating more than one.
4157  */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,unsigned long id)4158 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4159 {
4160 	int r;
4161 	struct kvm_vcpu *vcpu;
4162 	struct page *page;
4163 
4164 	/*
4165 	 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4166 	 * too-large values instead of silently truncating.
4167 	 *
4168 	 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4169 	 * changing the storage type (at the very least, IDs should be tracked
4170 	 * as unsigned ints).
4171 	 */
4172 	BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4173 	if (id >= KVM_MAX_VCPU_IDS)
4174 		return -EINVAL;
4175 
4176 	mutex_lock(&kvm->lock);
4177 	if (kvm->created_vcpus >= kvm->max_vcpus) {
4178 		mutex_unlock(&kvm->lock);
4179 		return -EINVAL;
4180 	}
4181 
4182 	r = kvm_arch_vcpu_precreate(kvm, id);
4183 	if (r) {
4184 		mutex_unlock(&kvm->lock);
4185 		return r;
4186 	}
4187 
4188 	kvm->created_vcpus++;
4189 	mutex_unlock(&kvm->lock);
4190 
4191 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4192 	if (!vcpu) {
4193 		r = -ENOMEM;
4194 		goto vcpu_decrement;
4195 	}
4196 
4197 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4198 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4199 	if (!page) {
4200 		r = -ENOMEM;
4201 		goto vcpu_free;
4202 	}
4203 	vcpu->run = page_address(page);
4204 
4205 	kvm_vcpu_init(vcpu, kvm, id);
4206 
4207 	r = kvm_arch_vcpu_create(vcpu);
4208 	if (r)
4209 		goto vcpu_free_run_page;
4210 
4211 	if (kvm->dirty_ring_size) {
4212 		r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring,
4213 					 id, kvm->dirty_ring_size);
4214 		if (r)
4215 			goto arch_vcpu_destroy;
4216 	}
4217 
4218 	mutex_lock(&kvm->lock);
4219 
4220 	if (kvm_get_vcpu_by_id(kvm, id)) {
4221 		r = -EEXIST;
4222 		goto unlock_vcpu_destroy;
4223 	}
4224 
4225 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4226 	r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
4227 	WARN_ON_ONCE(r == -EBUSY);
4228 	if (r)
4229 		goto unlock_vcpu_destroy;
4230 
4231 	/*
4232 	 * Now it's all set up, let userspace reach it.  Grab the vCPU's mutex
4233 	 * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
4234 	 * visible (per online_vcpus), e.g. so that KVM doesn't get tricked
4235 	 * into a NULL-pointer dereference because KVM thinks the _current_
4236 	 * vCPU doesn't exist.  As a bonus, taking vcpu->mutex ensures lockdep
4237 	 * knows it's taken *inside* kvm->lock.
4238 	 */
4239 	mutex_lock(&vcpu->mutex);
4240 	kvm_get_kvm(kvm);
4241 	r = create_vcpu_fd(vcpu);
4242 	if (r < 0)
4243 		goto kvm_put_xa_erase;
4244 
4245 	/*
4246 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
4247 	 * pointer before kvm->online_vcpu's incremented value.
4248 	 */
4249 	smp_wmb();
4250 	atomic_inc(&kvm->online_vcpus);
4251 	mutex_unlock(&vcpu->mutex);
4252 
4253 	mutex_unlock(&kvm->lock);
4254 	kvm_arch_vcpu_postcreate(vcpu);
4255 	kvm_create_vcpu_debugfs(vcpu);
4256 	return r;
4257 
4258 kvm_put_xa_erase:
4259 	mutex_unlock(&vcpu->mutex);
4260 	kvm_put_kvm_no_destroy(kvm);
4261 	xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
4262 unlock_vcpu_destroy:
4263 	mutex_unlock(&kvm->lock);
4264 	kvm_dirty_ring_free(&vcpu->dirty_ring);
4265 arch_vcpu_destroy:
4266 	kvm_arch_vcpu_destroy(vcpu);
4267 vcpu_free_run_page:
4268 	free_page((unsigned long)vcpu->run);
4269 vcpu_free:
4270 	kmem_cache_free(kvm_vcpu_cache, vcpu);
4271 vcpu_decrement:
4272 	mutex_lock(&kvm->lock);
4273 	kvm->created_vcpus--;
4274 	mutex_unlock(&kvm->lock);
4275 	return r;
4276 }
4277 
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4278 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4279 {
4280 	if (sigset) {
4281 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4282 		vcpu->sigset_active = 1;
4283 		vcpu->sigset = *sigset;
4284 	} else
4285 		vcpu->sigset_active = 0;
4286 	return 0;
4287 }
4288 
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4289 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4290 			      size_t size, loff_t *offset)
4291 {
4292 	struct kvm_vcpu *vcpu = file->private_data;
4293 
4294 	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4295 			&kvm_vcpu_stats_desc[0], &vcpu->stat,
4296 			sizeof(vcpu->stat), user_buffer, size, offset);
4297 }
4298 
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4299 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4300 {
4301 	struct kvm_vcpu *vcpu = file->private_data;
4302 
4303 	kvm_put_kvm(vcpu->kvm);
4304 	return 0;
4305 }
4306 
4307 static const struct file_operations kvm_vcpu_stats_fops = {
4308 	.owner = THIS_MODULE,
4309 	.read = kvm_vcpu_stats_read,
4310 	.release = kvm_vcpu_stats_release,
4311 	.llseek = noop_llseek,
4312 };
4313 
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)4314 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4315 {
4316 	int fd;
4317 	struct file *file;
4318 	char name[15 + ITOA_MAX_LEN + 1];
4319 
4320 	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4321 
4322 	fd = get_unused_fd_flags(O_CLOEXEC);
4323 	if (fd < 0)
4324 		return fd;
4325 
4326 	file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu,
4327 					O_RDONLY, FMODE_PREAD);
4328 	if (IS_ERR(file)) {
4329 		put_unused_fd(fd);
4330 		return PTR_ERR(file);
4331 	}
4332 
4333 	kvm_get_kvm(vcpu->kvm);
4334 	fd_install(fd, file);
4335 
4336 	return fd;
4337 }
4338 
4339 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
kvm_vcpu_pre_fault_memory(struct kvm_vcpu * vcpu,struct kvm_pre_fault_memory * range)4340 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4341 				     struct kvm_pre_fault_memory *range)
4342 {
4343 	int idx;
4344 	long r;
4345 	u64 full_size;
4346 
4347 	if (range->flags)
4348 		return -EINVAL;
4349 
4350 	if (!PAGE_ALIGNED(range->gpa) ||
4351 	    !PAGE_ALIGNED(range->size) ||
4352 	    range->gpa + range->size <= range->gpa)
4353 		return -EINVAL;
4354 
4355 	vcpu_load(vcpu);
4356 	idx = srcu_read_lock(&vcpu->kvm->srcu);
4357 
4358 	full_size = range->size;
4359 	do {
4360 		if (signal_pending(current)) {
4361 			r = -EINTR;
4362 			break;
4363 		}
4364 
4365 		r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4366 		if (WARN_ON_ONCE(r == 0 || r == -EIO))
4367 			break;
4368 
4369 		if (r < 0)
4370 			break;
4371 
4372 		range->size -= r;
4373 		range->gpa += r;
4374 		cond_resched();
4375 	} while (range->size);
4376 
4377 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
4378 	vcpu_put(vcpu);
4379 
4380 	/* Return success if at least one page was mapped successfully.  */
4381 	return full_size == range->size ? r : 0;
4382 }
4383 #endif
4384 
kvm_wait_for_vcpu_online(struct kvm_vcpu * vcpu)4385 static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
4386 {
4387 	struct kvm *kvm = vcpu->kvm;
4388 
4389 	/*
4390 	 * In practice, this happy path will always be taken, as a well-behaved
4391 	 * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns.
4392 	 */
4393 	if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus)))
4394 		return 0;
4395 
4396 	/*
4397 	 * Acquire and release the vCPU's mutex to wait for vCPU creation to
4398 	 * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU
4399 	 * is fully online).
4400 	 */
4401 	if (mutex_lock_killable(&vcpu->mutex))
4402 		return -EINTR;
4403 
4404 	mutex_unlock(&vcpu->mutex);
4405 
4406 	if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx)))
4407 		return -EIO;
4408 
4409 	return 0;
4410 }
4411 
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4412 static long kvm_vcpu_ioctl(struct file *filp,
4413 			   unsigned int ioctl, unsigned long arg)
4414 {
4415 	struct kvm_vcpu *vcpu = filp->private_data;
4416 	void __user *argp = (void __user *)arg;
4417 	int r;
4418 	struct kvm_fpu *fpu = NULL;
4419 	struct kvm_sregs *kvm_sregs = NULL;
4420 
4421 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4422 		return -EIO;
4423 
4424 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4425 		return -EINVAL;
4426 
4427 	/*
4428 	 * Wait for the vCPU to be online before handling the ioctl(), as KVM
4429 	 * assumes the vCPU is reachable via vcpu_array, i.e. may dereference
4430 	 * a NULL pointer if userspace invokes an ioctl() before KVM is ready.
4431 	 */
4432 	r = kvm_wait_for_vcpu_online(vcpu);
4433 	if (r)
4434 		return r;
4435 
4436 	/*
4437 	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4438 	 * execution; mutex_lock() would break them.
4439 	 */
4440 	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4441 	if (r != -ENOIOCTLCMD)
4442 		return r;
4443 
4444 	if (mutex_lock_killable(&vcpu->mutex))
4445 		return -EINTR;
4446 	switch (ioctl) {
4447 	case KVM_RUN: {
4448 		struct pid *oldpid;
4449 		r = -EINVAL;
4450 		if (arg)
4451 			goto out;
4452 
4453 		/*
4454 		 * Note, vcpu->pid is primarily protected by vcpu->mutex. The
4455 		 * dedicated r/w lock allows other tasks, e.g. other vCPUs, to
4456 		 * read vcpu->pid while this vCPU is in KVM_RUN, e.g. to yield
4457 		 * directly to this vCPU
4458 		 */
4459 		oldpid = vcpu->pid;
4460 		if (unlikely(oldpid != task_pid(current))) {
4461 			/* The thread running this VCPU changed. */
4462 			struct pid *newpid;
4463 
4464 			r = kvm_arch_vcpu_run_pid_change(vcpu);
4465 			if (r)
4466 				break;
4467 
4468 			newpid = get_task_pid(current, PIDTYPE_PID);
4469 			write_lock(&vcpu->pid_lock);
4470 			vcpu->pid = newpid;
4471 			write_unlock(&vcpu->pid_lock);
4472 
4473 			put_pid(oldpid);
4474 		}
4475 		vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4476 		r = kvm_arch_vcpu_ioctl_run(vcpu);
4477 		vcpu->wants_to_run = false;
4478 
4479 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4480 		break;
4481 	}
4482 	case KVM_GET_REGS: {
4483 		struct kvm_regs *kvm_regs;
4484 
4485 		r = -ENOMEM;
4486 		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
4487 		if (!kvm_regs)
4488 			goto out;
4489 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4490 		if (r)
4491 			goto out_free1;
4492 		r = -EFAULT;
4493 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4494 			goto out_free1;
4495 		r = 0;
4496 out_free1:
4497 		kfree(kvm_regs);
4498 		break;
4499 	}
4500 	case KVM_SET_REGS: {
4501 		struct kvm_regs *kvm_regs;
4502 
4503 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4504 		if (IS_ERR(kvm_regs)) {
4505 			r = PTR_ERR(kvm_regs);
4506 			goto out;
4507 		}
4508 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4509 		kfree(kvm_regs);
4510 		break;
4511 	}
4512 	case KVM_GET_SREGS: {
4513 		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
4514 		r = -ENOMEM;
4515 		if (!kvm_sregs)
4516 			goto out;
4517 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4518 		if (r)
4519 			goto out;
4520 		r = -EFAULT;
4521 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4522 			goto out;
4523 		r = 0;
4524 		break;
4525 	}
4526 	case KVM_SET_SREGS: {
4527 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4528 		if (IS_ERR(kvm_sregs)) {
4529 			r = PTR_ERR(kvm_sregs);
4530 			kvm_sregs = NULL;
4531 			goto out;
4532 		}
4533 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4534 		break;
4535 	}
4536 	case KVM_GET_MP_STATE: {
4537 		struct kvm_mp_state mp_state;
4538 
4539 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4540 		if (r)
4541 			goto out;
4542 		r = -EFAULT;
4543 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4544 			goto out;
4545 		r = 0;
4546 		break;
4547 	}
4548 	case KVM_SET_MP_STATE: {
4549 		struct kvm_mp_state mp_state;
4550 
4551 		r = -EFAULT;
4552 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4553 			goto out;
4554 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4555 		break;
4556 	}
4557 	case KVM_TRANSLATE: {
4558 		struct kvm_translation tr;
4559 
4560 		r = -EFAULT;
4561 		if (copy_from_user(&tr, argp, sizeof(tr)))
4562 			goto out;
4563 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4564 		if (r)
4565 			goto out;
4566 		r = -EFAULT;
4567 		if (copy_to_user(argp, &tr, sizeof(tr)))
4568 			goto out;
4569 		r = 0;
4570 		break;
4571 	}
4572 	case KVM_SET_GUEST_DEBUG: {
4573 		struct kvm_guest_debug dbg;
4574 
4575 		r = -EFAULT;
4576 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
4577 			goto out;
4578 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4579 		break;
4580 	}
4581 	case KVM_SET_SIGNAL_MASK: {
4582 		struct kvm_signal_mask __user *sigmask_arg = argp;
4583 		struct kvm_signal_mask kvm_sigmask;
4584 		sigset_t sigset, *p;
4585 
4586 		p = NULL;
4587 		if (argp) {
4588 			r = -EFAULT;
4589 			if (copy_from_user(&kvm_sigmask, argp,
4590 					   sizeof(kvm_sigmask)))
4591 				goto out;
4592 			r = -EINVAL;
4593 			if (kvm_sigmask.len != sizeof(sigset))
4594 				goto out;
4595 			r = -EFAULT;
4596 			if (copy_from_user(&sigset, sigmask_arg->sigset,
4597 					   sizeof(sigset)))
4598 				goto out;
4599 			p = &sigset;
4600 		}
4601 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4602 		break;
4603 	}
4604 	case KVM_GET_FPU: {
4605 		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
4606 		r = -ENOMEM;
4607 		if (!fpu)
4608 			goto out;
4609 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4610 		if (r)
4611 			goto out;
4612 		r = -EFAULT;
4613 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4614 			goto out;
4615 		r = 0;
4616 		break;
4617 	}
4618 	case KVM_SET_FPU: {
4619 		fpu = memdup_user(argp, sizeof(*fpu));
4620 		if (IS_ERR(fpu)) {
4621 			r = PTR_ERR(fpu);
4622 			fpu = NULL;
4623 			goto out;
4624 		}
4625 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4626 		break;
4627 	}
4628 	case KVM_GET_STATS_FD: {
4629 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4630 		break;
4631 	}
4632 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4633 	case KVM_PRE_FAULT_MEMORY: {
4634 		struct kvm_pre_fault_memory range;
4635 
4636 		r = -EFAULT;
4637 		if (copy_from_user(&range, argp, sizeof(range)))
4638 			break;
4639 		r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4640 		/* Pass back leftover range. */
4641 		if (copy_to_user(argp, &range, sizeof(range)))
4642 			r = -EFAULT;
4643 		break;
4644 	}
4645 #endif
4646 	default:
4647 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4648 	}
4649 out:
4650 	mutex_unlock(&vcpu->mutex);
4651 	kfree(fpu);
4652 	kfree(kvm_sregs);
4653 	return r;
4654 }
4655 
4656 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4657 static long kvm_vcpu_compat_ioctl(struct file *filp,
4658 				  unsigned int ioctl, unsigned long arg)
4659 {
4660 	struct kvm_vcpu *vcpu = filp->private_data;
4661 	void __user *argp = compat_ptr(arg);
4662 	int r;
4663 
4664 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4665 		return -EIO;
4666 
4667 	switch (ioctl) {
4668 	case KVM_SET_SIGNAL_MASK: {
4669 		struct kvm_signal_mask __user *sigmask_arg = argp;
4670 		struct kvm_signal_mask kvm_sigmask;
4671 		sigset_t sigset;
4672 
4673 		if (argp) {
4674 			r = -EFAULT;
4675 			if (copy_from_user(&kvm_sigmask, argp,
4676 					   sizeof(kvm_sigmask)))
4677 				goto out;
4678 			r = -EINVAL;
4679 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4680 				goto out;
4681 			r = -EFAULT;
4682 			if (get_compat_sigset(&sigset,
4683 					      (compat_sigset_t __user *)sigmask_arg->sigset))
4684 				goto out;
4685 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4686 		} else
4687 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4688 		break;
4689 	}
4690 	default:
4691 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4692 	}
4693 
4694 out:
4695 	return r;
4696 }
4697 #endif
4698 
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4699 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4700 {
4701 	struct kvm_device *dev = filp->private_data;
4702 
4703 	if (dev->ops->mmap)
4704 		return dev->ops->mmap(dev, vma);
4705 
4706 	return -ENODEV;
4707 }
4708 
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4709 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4710 				 int (*accessor)(struct kvm_device *dev,
4711 						 struct kvm_device_attr *attr),
4712 				 unsigned long arg)
4713 {
4714 	struct kvm_device_attr attr;
4715 
4716 	if (!accessor)
4717 		return -EPERM;
4718 
4719 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4720 		return -EFAULT;
4721 
4722 	return accessor(dev, &attr);
4723 }
4724 
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4725 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4726 			     unsigned long arg)
4727 {
4728 	struct kvm_device *dev = filp->private_data;
4729 
4730 	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4731 		return -EIO;
4732 
4733 	switch (ioctl) {
4734 	case KVM_SET_DEVICE_ATTR:
4735 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4736 	case KVM_GET_DEVICE_ATTR:
4737 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4738 	case KVM_HAS_DEVICE_ATTR:
4739 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4740 	default:
4741 		if (dev->ops->ioctl)
4742 			return dev->ops->ioctl(dev, ioctl, arg);
4743 
4744 		return -ENOTTY;
4745 	}
4746 }
4747 
kvm_device_release(struct inode * inode,struct file * filp)4748 static int kvm_device_release(struct inode *inode, struct file *filp)
4749 {
4750 	struct kvm_device *dev = filp->private_data;
4751 	struct kvm *kvm = dev->kvm;
4752 
4753 	if (dev->ops->release) {
4754 		mutex_lock(&kvm->lock);
4755 		list_del_rcu(&dev->vm_node);
4756 		synchronize_rcu();
4757 		dev->ops->release(dev);
4758 		mutex_unlock(&kvm->lock);
4759 	}
4760 
4761 	kvm_put_kvm(kvm);
4762 	return 0;
4763 }
4764 
4765 static struct file_operations kvm_device_fops = {
4766 	.unlocked_ioctl = kvm_device_ioctl,
4767 	.release = kvm_device_release,
4768 	KVM_COMPAT(kvm_device_ioctl),
4769 	.mmap = kvm_device_mmap,
4770 };
4771 
kvm_device_from_filp(struct file * filp)4772 struct kvm_device *kvm_device_from_filp(struct file *filp)
4773 {
4774 	if (filp->f_op != &kvm_device_fops)
4775 		return NULL;
4776 
4777 	return filp->private_data;
4778 }
4779 
4780 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4781 #ifdef CONFIG_KVM_MPIC
4782 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4783 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4784 #endif
4785 };
4786 
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4787 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4788 {
4789 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4790 		return -ENOSPC;
4791 
4792 	if (kvm_device_ops_table[type] != NULL)
4793 		return -EEXIST;
4794 
4795 	kvm_device_ops_table[type] = ops;
4796 	return 0;
4797 }
4798 
kvm_unregister_device_ops(u32 type)4799 void kvm_unregister_device_ops(u32 type)
4800 {
4801 	if (kvm_device_ops_table[type] != NULL)
4802 		kvm_device_ops_table[type] = NULL;
4803 }
4804 
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4805 static int kvm_ioctl_create_device(struct kvm *kvm,
4806 				   struct kvm_create_device *cd)
4807 {
4808 	const struct kvm_device_ops *ops;
4809 	struct kvm_device *dev;
4810 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4811 	int type;
4812 	int ret;
4813 
4814 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4815 		return -ENODEV;
4816 
4817 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4818 	ops = kvm_device_ops_table[type];
4819 	if (ops == NULL)
4820 		return -ENODEV;
4821 
4822 	if (test)
4823 		return 0;
4824 
4825 	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4826 	if (!dev)
4827 		return -ENOMEM;
4828 
4829 	dev->ops = ops;
4830 	dev->kvm = kvm;
4831 
4832 	mutex_lock(&kvm->lock);
4833 	ret = ops->create(dev, type);
4834 	if (ret < 0) {
4835 		mutex_unlock(&kvm->lock);
4836 		kfree(dev);
4837 		return ret;
4838 	}
4839 	list_add_rcu(&dev->vm_node, &kvm->devices);
4840 	mutex_unlock(&kvm->lock);
4841 
4842 	if (ops->init)
4843 		ops->init(dev);
4844 
4845 	kvm_get_kvm(kvm);
4846 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4847 	if (ret < 0) {
4848 		kvm_put_kvm_no_destroy(kvm);
4849 		mutex_lock(&kvm->lock);
4850 		list_del_rcu(&dev->vm_node);
4851 		synchronize_rcu();
4852 		if (ops->release)
4853 			ops->release(dev);
4854 		mutex_unlock(&kvm->lock);
4855 		if (ops->destroy)
4856 			ops->destroy(dev);
4857 		return ret;
4858 	}
4859 
4860 	cd->fd = ret;
4861 	return 0;
4862 }
4863 
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)4864 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4865 {
4866 	switch (arg) {
4867 	case KVM_CAP_USER_MEMORY:
4868 	case KVM_CAP_USER_MEMORY2:
4869 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4870 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4871 	case KVM_CAP_INTERNAL_ERROR_DATA:
4872 #ifdef CONFIG_HAVE_KVM_MSI
4873 	case KVM_CAP_SIGNAL_MSI:
4874 #endif
4875 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4876 	case KVM_CAP_IRQFD:
4877 #endif
4878 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4879 	case KVM_CAP_CHECK_EXTENSION_VM:
4880 	case KVM_CAP_ENABLE_CAP_VM:
4881 	case KVM_CAP_HALT_POLL:
4882 		return 1;
4883 #ifdef CONFIG_KVM_MMIO
4884 	case KVM_CAP_COALESCED_MMIO:
4885 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4886 	case KVM_CAP_COALESCED_PIO:
4887 		return 1;
4888 #endif
4889 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4890 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4891 		return KVM_DIRTY_LOG_MANUAL_CAPS;
4892 #endif
4893 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4894 	case KVM_CAP_IRQ_ROUTING:
4895 		return KVM_MAX_IRQ_ROUTES;
4896 #endif
4897 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4898 	case KVM_CAP_MULTI_ADDRESS_SPACE:
4899 		if (kvm)
4900 			return kvm_arch_nr_memslot_as_ids(kvm);
4901 		return KVM_MAX_NR_ADDRESS_SPACES;
4902 #endif
4903 	case KVM_CAP_NR_MEMSLOTS:
4904 		return KVM_USER_MEM_SLOTS;
4905 	case KVM_CAP_DIRTY_LOG_RING:
4906 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4907 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4908 #else
4909 		return 0;
4910 #endif
4911 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4912 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4913 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4914 #else
4915 		return 0;
4916 #endif
4917 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4918 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4919 #endif
4920 	case KVM_CAP_BINARY_STATS_FD:
4921 	case KVM_CAP_SYSTEM_EVENT_DATA:
4922 	case KVM_CAP_DEVICE_CTRL:
4923 		return 1;
4924 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4925 	case KVM_CAP_MEMORY_ATTRIBUTES:
4926 		return kvm_supported_mem_attributes(kvm);
4927 #endif
4928 #ifdef CONFIG_KVM_GUEST_MEMFD
4929 	case KVM_CAP_GUEST_MEMFD:
4930 		return 1;
4931 	case KVM_CAP_GUEST_MEMFD_MMAP:
4932 		return !kvm || kvm_arch_supports_gmem_mmap(kvm);
4933 #endif
4934 	default:
4935 		break;
4936 	}
4937 	return kvm_vm_ioctl_check_extension(kvm, arg);
4938 }
4939 
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4940 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4941 {
4942 	int r;
4943 
4944 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4945 		return -EINVAL;
4946 
4947 	/* the size should be power of 2 */
4948 	if (!size || (size & (size - 1)))
4949 		return -EINVAL;
4950 
4951 	/* Should be bigger to keep the reserved entries, or a page */
4952 	if (size < kvm_dirty_ring_get_rsvd_entries(kvm) *
4953 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4954 		return -EINVAL;
4955 
4956 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4957 	    sizeof(struct kvm_dirty_gfn))
4958 		return -E2BIG;
4959 
4960 	/* We only allow it to set once */
4961 	if (kvm->dirty_ring_size)
4962 		return -EINVAL;
4963 
4964 	mutex_lock(&kvm->lock);
4965 
4966 	if (kvm->created_vcpus) {
4967 		/* We don't allow to change this value after vcpu created */
4968 		r = -EINVAL;
4969 	} else {
4970 		kvm->dirty_ring_size = size;
4971 		r = 0;
4972 	}
4973 
4974 	mutex_unlock(&kvm->lock);
4975 	return r;
4976 }
4977 
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4978 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4979 {
4980 	unsigned long i;
4981 	struct kvm_vcpu *vcpu;
4982 	int cleared = 0, r;
4983 
4984 	if (!kvm->dirty_ring_size)
4985 		return -EINVAL;
4986 
4987 	mutex_lock(&kvm->slots_lock);
4988 
4989 	kvm_for_each_vcpu(i, vcpu, kvm) {
4990 		r = kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring, &cleared);
4991 		if (r)
4992 			break;
4993 	}
4994 
4995 	mutex_unlock(&kvm->slots_lock);
4996 
4997 	if (cleared)
4998 		kvm_flush_remote_tlbs(kvm);
4999 
5000 	return cleared;
5001 }
5002 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)5003 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5004 						  struct kvm_enable_cap *cap)
5005 {
5006 	return -EINVAL;
5007 }
5008 
kvm_are_all_memslots_empty(struct kvm * kvm)5009 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5010 {
5011 	int i;
5012 
5013 	lockdep_assert_held(&kvm->slots_lock);
5014 
5015 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5016 		if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5017 			return false;
5018 	}
5019 
5020 	return true;
5021 }
5022 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
5023 
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)5024 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5025 					   struct kvm_enable_cap *cap)
5026 {
5027 	switch (cap->cap) {
5028 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5029 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5030 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5031 
5032 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5033 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5034 
5035 		if (cap->flags || (cap->args[0] & ~allowed_options))
5036 			return -EINVAL;
5037 		kvm->manual_dirty_log_protect = cap->args[0];
5038 		return 0;
5039 	}
5040 #endif
5041 	case KVM_CAP_HALT_POLL: {
5042 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5043 			return -EINVAL;
5044 
5045 		kvm->max_halt_poll_ns = cap->args[0];
5046 
5047 		/*
5048 		 * Ensure kvm->override_halt_poll_ns does not become visible
5049 		 * before kvm->max_halt_poll_ns.
5050 		 *
5051 		 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5052 		 */
5053 		smp_wmb();
5054 		kvm->override_halt_poll_ns = true;
5055 
5056 		return 0;
5057 	}
5058 	case KVM_CAP_DIRTY_LOG_RING:
5059 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5060 		if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5061 			return -EINVAL;
5062 
5063 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5064 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5065 		int r = -EINVAL;
5066 
5067 		if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5068 		    !kvm->dirty_ring_size || cap->flags)
5069 			return r;
5070 
5071 		mutex_lock(&kvm->slots_lock);
5072 
5073 		/*
5074 		 * For simplicity, allow enabling ring+bitmap if and only if
5075 		 * there are no memslots, e.g. to ensure all memslots allocate
5076 		 * a bitmap after the capability is enabled.
5077 		 */
5078 		if (kvm_are_all_memslots_empty(kvm)) {
5079 			kvm->dirty_ring_with_bitmap = true;
5080 			r = 0;
5081 		}
5082 
5083 		mutex_unlock(&kvm->slots_lock);
5084 
5085 		return r;
5086 	}
5087 	default:
5088 		return kvm_vm_ioctl_enable_cap(kvm, cap);
5089 	}
5090 }
5091 
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)5092 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5093 			      size_t size, loff_t *offset)
5094 {
5095 	struct kvm *kvm = file->private_data;
5096 
5097 	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5098 				&kvm_vm_stats_desc[0], &kvm->stat,
5099 				sizeof(kvm->stat), user_buffer, size, offset);
5100 }
5101 
kvm_vm_stats_release(struct inode * inode,struct file * file)5102 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5103 {
5104 	struct kvm *kvm = file->private_data;
5105 
5106 	kvm_put_kvm(kvm);
5107 	return 0;
5108 }
5109 
5110 static const struct file_operations kvm_vm_stats_fops = {
5111 	.owner = THIS_MODULE,
5112 	.read = kvm_vm_stats_read,
5113 	.release = kvm_vm_stats_release,
5114 	.llseek = noop_llseek,
5115 };
5116 
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)5117 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5118 {
5119 	int fd;
5120 	struct file *file;
5121 
5122 	fd = get_unused_fd_flags(O_CLOEXEC);
5123 	if (fd < 0)
5124 		return fd;
5125 
5126 	file = anon_inode_getfile_fmode("kvm-vm-stats",
5127 			&kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD);
5128 	if (IS_ERR(file)) {
5129 		put_unused_fd(fd);
5130 		return PTR_ERR(file);
5131 	}
5132 
5133 	kvm_get_kvm(kvm);
5134 	fd_install(fd, file);
5135 
5136 	return fd;
5137 }
5138 
5139 #define SANITY_CHECK_MEM_REGION_FIELD(field)					\
5140 do {										\
5141 	BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=		\
5142 		     offsetof(struct kvm_userspace_memory_region2, field));	\
5143 	BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) !=		\
5144 		     sizeof_field(struct kvm_userspace_memory_region2, field));	\
5145 } while (0)
5146 
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5147 static long kvm_vm_ioctl(struct file *filp,
5148 			   unsigned int ioctl, unsigned long arg)
5149 {
5150 	struct kvm *kvm = filp->private_data;
5151 	void __user *argp = (void __user *)arg;
5152 	int r;
5153 
5154 	if (kvm->mm != current->mm || kvm->vm_dead)
5155 		return -EIO;
5156 	switch (ioctl) {
5157 	case KVM_CREATE_VCPU:
5158 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5159 		break;
5160 	case KVM_ENABLE_CAP: {
5161 		struct kvm_enable_cap cap;
5162 
5163 		r = -EFAULT;
5164 		if (copy_from_user(&cap, argp, sizeof(cap)))
5165 			goto out;
5166 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5167 		break;
5168 	}
5169 	case KVM_SET_USER_MEMORY_REGION2:
5170 	case KVM_SET_USER_MEMORY_REGION: {
5171 		struct kvm_userspace_memory_region2 mem;
5172 		unsigned long size;
5173 
5174 		if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5175 			/*
5176 			 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5177 			 * accessed, but avoid leaking kernel memory in case of a bug.
5178 			 */
5179 			memset(&mem, 0, sizeof(mem));
5180 			size = sizeof(struct kvm_userspace_memory_region);
5181 		} else {
5182 			size = sizeof(struct kvm_userspace_memory_region2);
5183 		}
5184 
5185 		/* Ensure the common parts of the two structs are identical. */
5186 		SANITY_CHECK_MEM_REGION_FIELD(slot);
5187 		SANITY_CHECK_MEM_REGION_FIELD(flags);
5188 		SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5189 		SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5190 		SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5191 
5192 		r = -EFAULT;
5193 		if (copy_from_user(&mem, argp, size))
5194 			goto out;
5195 
5196 		r = -EINVAL;
5197 		if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5198 		    (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5199 			goto out;
5200 
5201 		r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5202 		break;
5203 	}
5204 	case KVM_GET_DIRTY_LOG: {
5205 		struct kvm_dirty_log log;
5206 
5207 		r = -EFAULT;
5208 		if (copy_from_user(&log, argp, sizeof(log)))
5209 			goto out;
5210 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5211 		break;
5212 	}
5213 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5214 	case KVM_CLEAR_DIRTY_LOG: {
5215 		struct kvm_clear_dirty_log log;
5216 
5217 		r = -EFAULT;
5218 		if (copy_from_user(&log, argp, sizeof(log)))
5219 			goto out;
5220 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5221 		break;
5222 	}
5223 #endif
5224 #ifdef CONFIG_KVM_MMIO
5225 	case KVM_REGISTER_COALESCED_MMIO: {
5226 		struct kvm_coalesced_mmio_zone zone;
5227 
5228 		r = -EFAULT;
5229 		if (copy_from_user(&zone, argp, sizeof(zone)))
5230 			goto out;
5231 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5232 		break;
5233 	}
5234 	case KVM_UNREGISTER_COALESCED_MMIO: {
5235 		struct kvm_coalesced_mmio_zone zone;
5236 
5237 		r = -EFAULT;
5238 		if (copy_from_user(&zone, argp, sizeof(zone)))
5239 			goto out;
5240 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5241 		break;
5242 	}
5243 #endif
5244 	case KVM_IRQFD: {
5245 		struct kvm_irqfd data;
5246 
5247 		r = -EFAULT;
5248 		if (copy_from_user(&data, argp, sizeof(data)))
5249 			goto out;
5250 		r = kvm_irqfd(kvm, &data);
5251 		break;
5252 	}
5253 	case KVM_IOEVENTFD: {
5254 		struct kvm_ioeventfd data;
5255 
5256 		r = -EFAULT;
5257 		if (copy_from_user(&data, argp, sizeof(data)))
5258 			goto out;
5259 		r = kvm_ioeventfd(kvm, &data);
5260 		break;
5261 	}
5262 #ifdef CONFIG_HAVE_KVM_MSI
5263 	case KVM_SIGNAL_MSI: {
5264 		struct kvm_msi msi;
5265 
5266 		r = -EFAULT;
5267 		if (copy_from_user(&msi, argp, sizeof(msi)))
5268 			goto out;
5269 		r = kvm_send_userspace_msi(kvm, &msi);
5270 		break;
5271 	}
5272 #endif
5273 #ifdef __KVM_HAVE_IRQ_LINE
5274 	case KVM_IRQ_LINE_STATUS:
5275 	case KVM_IRQ_LINE: {
5276 		struct kvm_irq_level irq_event;
5277 
5278 		r = -EFAULT;
5279 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5280 			goto out;
5281 
5282 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5283 					ioctl == KVM_IRQ_LINE_STATUS);
5284 		if (r)
5285 			goto out;
5286 
5287 		r = -EFAULT;
5288 		if (ioctl == KVM_IRQ_LINE_STATUS) {
5289 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5290 				goto out;
5291 		}
5292 
5293 		r = 0;
5294 		break;
5295 	}
5296 #endif
5297 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5298 	case KVM_SET_GSI_ROUTING: {
5299 		struct kvm_irq_routing routing;
5300 		struct kvm_irq_routing __user *urouting;
5301 		struct kvm_irq_routing_entry *entries = NULL;
5302 
5303 		r = -EFAULT;
5304 		if (copy_from_user(&routing, argp, sizeof(routing)))
5305 			goto out;
5306 		r = -EINVAL;
5307 		if (!kvm_arch_can_set_irq_routing(kvm))
5308 			goto out;
5309 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
5310 			goto out;
5311 		if (routing.flags)
5312 			goto out;
5313 		if (routing.nr) {
5314 			urouting = argp;
5315 			entries = vmemdup_array_user(urouting->entries,
5316 						     routing.nr, sizeof(*entries));
5317 			if (IS_ERR(entries)) {
5318 				r = PTR_ERR(entries);
5319 				goto out;
5320 			}
5321 		}
5322 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
5323 					routing.flags);
5324 		kvfree(entries);
5325 		break;
5326 	}
5327 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5328 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5329 	case KVM_SET_MEMORY_ATTRIBUTES: {
5330 		struct kvm_memory_attributes attrs;
5331 
5332 		r = -EFAULT;
5333 		if (copy_from_user(&attrs, argp, sizeof(attrs)))
5334 			goto out;
5335 
5336 		r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5337 		break;
5338 	}
5339 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5340 	case KVM_CREATE_DEVICE: {
5341 		struct kvm_create_device cd;
5342 
5343 		r = -EFAULT;
5344 		if (copy_from_user(&cd, argp, sizeof(cd)))
5345 			goto out;
5346 
5347 		r = kvm_ioctl_create_device(kvm, &cd);
5348 		if (r)
5349 			goto out;
5350 
5351 		r = -EFAULT;
5352 		if (copy_to_user(argp, &cd, sizeof(cd)))
5353 			goto out;
5354 
5355 		r = 0;
5356 		break;
5357 	}
5358 	case KVM_CHECK_EXTENSION:
5359 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5360 		break;
5361 	case KVM_RESET_DIRTY_RINGS:
5362 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5363 		break;
5364 	case KVM_GET_STATS_FD:
5365 		r = kvm_vm_ioctl_get_stats_fd(kvm);
5366 		break;
5367 #ifdef CONFIG_KVM_GUEST_MEMFD
5368 	case KVM_CREATE_GUEST_MEMFD: {
5369 		struct kvm_create_guest_memfd guest_memfd;
5370 
5371 		r = -EFAULT;
5372 		if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5373 			goto out;
5374 
5375 		r = kvm_gmem_create(kvm, &guest_memfd);
5376 		break;
5377 	}
5378 #endif
5379 	default:
5380 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5381 	}
5382 out:
5383 	return r;
5384 }
5385 
5386 #ifdef CONFIG_KVM_COMPAT
5387 struct compat_kvm_dirty_log {
5388 	__u32 slot;
5389 	__u32 padding1;
5390 	union {
5391 		compat_uptr_t dirty_bitmap; /* one bit per page */
5392 		__u64 padding2;
5393 	};
5394 };
5395 
5396 struct compat_kvm_clear_dirty_log {
5397 	__u32 slot;
5398 	__u32 num_pages;
5399 	__u64 first_page;
5400 	union {
5401 		compat_uptr_t dirty_bitmap; /* one bit per page */
5402 		__u64 padding2;
5403 	};
5404 };
5405 
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5406 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5407 				     unsigned long arg)
5408 {
5409 	return -ENOTTY;
5410 }
5411 
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5412 static long kvm_vm_compat_ioctl(struct file *filp,
5413 			   unsigned int ioctl, unsigned long arg)
5414 {
5415 	struct kvm *kvm = filp->private_data;
5416 	int r;
5417 
5418 	if (kvm->mm != current->mm || kvm->vm_dead)
5419 		return -EIO;
5420 
5421 	r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5422 	if (r != -ENOTTY)
5423 		return r;
5424 
5425 	switch (ioctl) {
5426 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5427 	case KVM_CLEAR_DIRTY_LOG: {
5428 		struct compat_kvm_clear_dirty_log compat_log;
5429 		struct kvm_clear_dirty_log log;
5430 
5431 		if (copy_from_user(&compat_log, (void __user *)arg,
5432 				   sizeof(compat_log)))
5433 			return -EFAULT;
5434 		log.slot	 = compat_log.slot;
5435 		log.num_pages	 = compat_log.num_pages;
5436 		log.first_page	 = compat_log.first_page;
5437 		log.padding2	 = compat_log.padding2;
5438 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5439 
5440 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5441 		break;
5442 	}
5443 #endif
5444 	case KVM_GET_DIRTY_LOG: {
5445 		struct compat_kvm_dirty_log compat_log;
5446 		struct kvm_dirty_log log;
5447 
5448 		if (copy_from_user(&compat_log, (void __user *)arg,
5449 				   sizeof(compat_log)))
5450 			return -EFAULT;
5451 		log.slot	 = compat_log.slot;
5452 		log.padding1	 = compat_log.padding1;
5453 		log.padding2	 = compat_log.padding2;
5454 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5455 
5456 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5457 		break;
5458 	}
5459 	default:
5460 		r = kvm_vm_ioctl(filp, ioctl, arg);
5461 	}
5462 	return r;
5463 }
5464 #endif
5465 
5466 static struct file_operations kvm_vm_fops = {
5467 	.release        = kvm_vm_release,
5468 	.unlocked_ioctl = kvm_vm_ioctl,
5469 	.llseek		= noop_llseek,
5470 	KVM_COMPAT(kvm_vm_compat_ioctl),
5471 };
5472 
file_is_kvm(struct file * file)5473 bool file_is_kvm(struct file *file)
5474 {
5475 	return file && file->f_op == &kvm_vm_fops;
5476 }
5477 EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
5478 
kvm_dev_ioctl_create_vm(unsigned long type)5479 static int kvm_dev_ioctl_create_vm(unsigned long type)
5480 {
5481 	char fdname[ITOA_MAX_LEN + 1];
5482 	int r, fd;
5483 	struct kvm *kvm;
5484 	struct file *file;
5485 
5486 	fd = get_unused_fd_flags(O_CLOEXEC);
5487 	if (fd < 0)
5488 		return fd;
5489 
5490 	snprintf(fdname, sizeof(fdname), "%d", fd);
5491 
5492 	kvm = kvm_create_vm(type, fdname);
5493 	if (IS_ERR(kvm)) {
5494 		r = PTR_ERR(kvm);
5495 		goto put_fd;
5496 	}
5497 
5498 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5499 	if (IS_ERR(file)) {
5500 		r = PTR_ERR(file);
5501 		goto put_kvm;
5502 	}
5503 
5504 	/*
5505 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5506 	 * already set, with ->release() being kvm_vm_release().  In error
5507 	 * cases it will be called by the final fput(file) and will take
5508 	 * care of doing kvm_put_kvm(kvm).
5509 	 */
5510 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5511 
5512 	fd_install(fd, file);
5513 	return fd;
5514 
5515 put_kvm:
5516 	kvm_put_kvm(kvm);
5517 put_fd:
5518 	put_unused_fd(fd);
5519 	return r;
5520 }
5521 
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5522 static long kvm_dev_ioctl(struct file *filp,
5523 			  unsigned int ioctl, unsigned long arg)
5524 {
5525 	int r = -EINVAL;
5526 
5527 	switch (ioctl) {
5528 	case KVM_GET_API_VERSION:
5529 		if (arg)
5530 			goto out;
5531 		r = KVM_API_VERSION;
5532 		break;
5533 	case KVM_CREATE_VM:
5534 		r = kvm_dev_ioctl_create_vm(arg);
5535 		break;
5536 	case KVM_CHECK_EXTENSION:
5537 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5538 		break;
5539 	case KVM_GET_VCPU_MMAP_SIZE:
5540 		if (arg)
5541 			goto out;
5542 		r = PAGE_SIZE;     /* struct kvm_run */
5543 #ifdef CONFIG_X86
5544 		r += PAGE_SIZE;    /* pio data page */
5545 #endif
5546 #ifdef CONFIG_KVM_MMIO
5547 		r += PAGE_SIZE;    /* coalesced mmio ring page */
5548 #endif
5549 		break;
5550 	default:
5551 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
5552 	}
5553 out:
5554 	return r;
5555 }
5556 
5557 static struct file_operations kvm_chardev_ops = {
5558 	.unlocked_ioctl = kvm_dev_ioctl,
5559 	.llseek		= noop_llseek,
5560 	KVM_COMPAT(kvm_dev_ioctl),
5561 };
5562 
5563 static struct miscdevice kvm_dev = {
5564 	KVM_MINOR,
5565 	"kvm",
5566 	&kvm_chardev_ops,
5567 };
5568 
5569 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5570 bool enable_virt_at_load = true;
5571 module_param(enable_virt_at_load, bool, 0444);
5572 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
5573 
5574 __visible bool kvm_rebooting;
5575 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
5576 
5577 static DEFINE_PER_CPU(bool, virtualization_enabled);
5578 static DEFINE_MUTEX(kvm_usage_lock);
5579 static int kvm_usage_count;
5580 
kvm_arch_enable_virtualization(void)5581 __weak void kvm_arch_enable_virtualization(void)
5582 {
5583 
5584 }
5585 
kvm_arch_disable_virtualization(void)5586 __weak void kvm_arch_disable_virtualization(void)
5587 {
5588 
5589 }
5590 
kvm_enable_virtualization_cpu(void)5591 static int kvm_enable_virtualization_cpu(void)
5592 {
5593 	if (__this_cpu_read(virtualization_enabled))
5594 		return 0;
5595 
5596 	if (kvm_arch_enable_virtualization_cpu()) {
5597 		pr_info("kvm: enabling virtualization on CPU%d failed\n",
5598 			raw_smp_processor_id());
5599 		return -EIO;
5600 	}
5601 
5602 	__this_cpu_write(virtualization_enabled, true);
5603 	return 0;
5604 }
5605 
kvm_online_cpu(unsigned int cpu)5606 static int kvm_online_cpu(unsigned int cpu)
5607 {
5608 	/*
5609 	 * Abort the CPU online process if hardware virtualization cannot
5610 	 * be enabled. Otherwise running VMs would encounter unrecoverable
5611 	 * errors when scheduled to this CPU.
5612 	 */
5613 	return kvm_enable_virtualization_cpu();
5614 }
5615 
kvm_disable_virtualization_cpu(void * ign)5616 static void kvm_disable_virtualization_cpu(void *ign)
5617 {
5618 	if (!__this_cpu_read(virtualization_enabled))
5619 		return;
5620 
5621 	kvm_arch_disable_virtualization_cpu();
5622 
5623 	__this_cpu_write(virtualization_enabled, false);
5624 }
5625 
kvm_offline_cpu(unsigned int cpu)5626 static int kvm_offline_cpu(unsigned int cpu)
5627 {
5628 	kvm_disable_virtualization_cpu(NULL);
5629 	return 0;
5630 }
5631 
kvm_shutdown(void)5632 static void kvm_shutdown(void)
5633 {
5634 	/*
5635 	 * Disable hardware virtualization and set kvm_rebooting to indicate
5636 	 * that KVM has asynchronously disabled hardware virtualization, i.e.
5637 	 * that relevant errors and exceptions aren't entirely unexpected.
5638 	 * Some flavors of hardware virtualization need to be disabled before
5639 	 * transferring control to firmware (to perform shutdown/reboot), e.g.
5640 	 * on x86, virtualization can block INIT interrupts, which are used by
5641 	 * firmware to pull APs back under firmware control.  Note, this path
5642 	 * is used for both shutdown and reboot scenarios, i.e. neither name is
5643 	 * 100% comprehensive.
5644 	 */
5645 	pr_info("kvm: exiting hardware virtualization\n");
5646 	kvm_rebooting = true;
5647 	on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
5648 }
5649 
kvm_suspend(void)5650 static int kvm_suspend(void)
5651 {
5652 	/*
5653 	 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5654 	 * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
5655 	 * count is stable.  Assert that kvm_usage_lock is not held to ensure
5656 	 * the system isn't suspended while KVM is enabling hardware.  Hardware
5657 	 * enabling can be preempted, but the task cannot be frozen until it has
5658 	 * dropped all locks (userspace tasks are frozen via a fake signal).
5659 	 */
5660 	lockdep_assert_not_held(&kvm_usage_lock);
5661 	lockdep_assert_irqs_disabled();
5662 
5663 	kvm_disable_virtualization_cpu(NULL);
5664 	return 0;
5665 }
5666 
kvm_resume(void)5667 static void kvm_resume(void)
5668 {
5669 	lockdep_assert_not_held(&kvm_usage_lock);
5670 	lockdep_assert_irqs_disabled();
5671 
5672 	WARN_ON_ONCE(kvm_enable_virtualization_cpu());
5673 }
5674 
5675 static struct syscore_ops kvm_syscore_ops = {
5676 	.suspend = kvm_suspend,
5677 	.resume = kvm_resume,
5678 	.shutdown = kvm_shutdown,
5679 };
5680 
kvm_enable_virtualization(void)5681 int kvm_enable_virtualization(void)
5682 {
5683 	int r;
5684 
5685 	guard(mutex)(&kvm_usage_lock);
5686 
5687 	if (kvm_usage_count++)
5688 		return 0;
5689 
5690 	kvm_arch_enable_virtualization();
5691 
5692 	r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
5693 			      kvm_online_cpu, kvm_offline_cpu);
5694 	if (r)
5695 		goto err_cpuhp;
5696 
5697 	register_syscore_ops(&kvm_syscore_ops);
5698 
5699 	/*
5700 	 * Undo virtualization enabling and bail if the system is going down.
5701 	 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5702 	 * possible for an in-flight operation to enable virtualization after
5703 	 * syscore_shutdown() is called, i.e. without kvm_shutdown() being
5704 	 * invoked.  Note, this relies on system_state being set _before_
5705 	 * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
5706 	 * or this CPU observes the impending shutdown.  Which is why KVM uses
5707 	 * a syscore ops hook instead of registering a dedicated reboot
5708 	 * notifier (the latter runs before system_state is updated).
5709 	 */
5710 	if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5711 	    system_state == SYSTEM_RESTART) {
5712 		r = -EBUSY;
5713 		goto err_rebooting;
5714 	}
5715 
5716 	return 0;
5717 
5718 err_rebooting:
5719 	unregister_syscore_ops(&kvm_syscore_ops);
5720 	cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5721 err_cpuhp:
5722 	kvm_arch_disable_virtualization();
5723 	--kvm_usage_count;
5724 	return r;
5725 }
5726 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
5727 
kvm_disable_virtualization(void)5728 void kvm_disable_virtualization(void)
5729 {
5730 	guard(mutex)(&kvm_usage_lock);
5731 
5732 	if (--kvm_usage_count)
5733 		return;
5734 
5735 	unregister_syscore_ops(&kvm_syscore_ops);
5736 	cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
5737 	kvm_arch_disable_virtualization();
5738 }
5739 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
5740 
kvm_init_virtualization(void)5741 static int kvm_init_virtualization(void)
5742 {
5743 	if (enable_virt_at_load)
5744 		return kvm_enable_virtualization();
5745 
5746 	return 0;
5747 }
5748 
kvm_uninit_virtualization(void)5749 static void kvm_uninit_virtualization(void)
5750 {
5751 	if (enable_virt_at_load)
5752 		kvm_disable_virtualization();
5753 }
5754 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
kvm_init_virtualization(void)5755 static int kvm_init_virtualization(void)
5756 {
5757 	return 0;
5758 }
5759 
kvm_uninit_virtualization(void)5760 static void kvm_uninit_virtualization(void)
5761 {
5762 
5763 }
5764 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5765 
kvm_iodevice_destructor(struct kvm_io_device * dev)5766 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5767 {
5768 	if (dev->ops->destructor)
5769 		dev->ops->destructor(dev);
5770 }
5771 
kvm_io_bus_destroy(struct kvm_io_bus * bus)5772 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5773 {
5774 	int i;
5775 
5776 	for (i = 0; i < bus->dev_count; i++) {
5777 		struct kvm_io_device *pos = bus->range[i].dev;
5778 
5779 		kvm_iodevice_destructor(pos);
5780 	}
5781 	kfree(bus);
5782 }
5783 
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5784 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5785 				 const struct kvm_io_range *r2)
5786 {
5787 	gpa_t addr1 = r1->addr;
5788 	gpa_t addr2 = r2->addr;
5789 
5790 	if (addr1 < addr2)
5791 		return -1;
5792 
5793 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
5794 	 * accept any overlapping write.  Any order is acceptable for
5795 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5796 	 * we process all of them.
5797 	 */
5798 	if (r2->len) {
5799 		addr1 += r1->len;
5800 		addr2 += r2->len;
5801 	}
5802 
5803 	if (addr1 > addr2)
5804 		return 1;
5805 
5806 	return 0;
5807 }
5808 
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5809 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5810 {
5811 	return kvm_io_bus_cmp(p1, p2);
5812 }
5813 
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5814 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5815 			     gpa_t addr, int len)
5816 {
5817 	struct kvm_io_range *range, key;
5818 	int off;
5819 
5820 	key = (struct kvm_io_range) {
5821 		.addr = addr,
5822 		.len = len,
5823 	};
5824 
5825 	range = bsearch(&key, bus->range, bus->dev_count,
5826 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5827 	if (range == NULL)
5828 		return -ENOENT;
5829 
5830 	off = range - bus->range;
5831 
5832 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5833 		off--;
5834 
5835 	return off;
5836 }
5837 
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5838 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5839 			      struct kvm_io_range *range, const void *val)
5840 {
5841 	int idx;
5842 
5843 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5844 	if (idx < 0)
5845 		return -EOPNOTSUPP;
5846 
5847 	while (idx < bus->dev_count &&
5848 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5849 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5850 					range->len, val))
5851 			return idx;
5852 		idx++;
5853 	}
5854 
5855 	return -EOPNOTSUPP;
5856 }
5857 
kvm_get_bus_srcu(struct kvm * kvm,enum kvm_bus idx)5858 static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
5859 {
5860 	/*
5861 	 * Ensure that any updates to kvm_buses[] observed by the previous vCPU
5862 	 * machine instruction are also visible to the vCPU machine instruction
5863 	 * that triggered this call.
5864 	 */
5865 	smp_mb__after_srcu_read_lock();
5866 
5867 	return srcu_dereference(kvm->buses[idx], &kvm->srcu);
5868 }
5869 
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5870 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5871 		     int len, const void *val)
5872 {
5873 	struct kvm_io_bus *bus;
5874 	struct kvm_io_range range;
5875 	int r;
5876 
5877 	range = (struct kvm_io_range) {
5878 		.addr = addr,
5879 		.len = len,
5880 	};
5881 
5882 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5883 	if (!bus)
5884 		return -ENOMEM;
5885 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5886 	return r < 0 ? r : 0;
5887 }
5888 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
5889 
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5890 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5891 			    gpa_t addr, int len, const void *val, long cookie)
5892 {
5893 	struct kvm_io_bus *bus;
5894 	struct kvm_io_range range;
5895 
5896 	range = (struct kvm_io_range) {
5897 		.addr = addr,
5898 		.len = len,
5899 	};
5900 
5901 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5902 	if (!bus)
5903 		return -ENOMEM;
5904 
5905 	/* First try the device referenced by cookie. */
5906 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5907 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5908 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5909 					val))
5910 			return cookie;
5911 
5912 	/*
5913 	 * cookie contained garbage; fall back to search and return the
5914 	 * correct cookie value.
5915 	 */
5916 	return __kvm_io_bus_write(vcpu, bus, &range, val);
5917 }
5918 
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5919 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5920 			     struct kvm_io_range *range, void *val)
5921 {
5922 	int idx;
5923 
5924 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5925 	if (idx < 0)
5926 		return -EOPNOTSUPP;
5927 
5928 	while (idx < bus->dev_count &&
5929 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5930 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5931 				       range->len, val))
5932 			return idx;
5933 		idx++;
5934 	}
5935 
5936 	return -EOPNOTSUPP;
5937 }
5938 
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5939 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5940 		    int len, void *val)
5941 {
5942 	struct kvm_io_bus *bus;
5943 	struct kvm_io_range range;
5944 	int r;
5945 
5946 	range = (struct kvm_io_range) {
5947 		.addr = addr,
5948 		.len = len,
5949 	};
5950 
5951 	bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
5952 	if (!bus)
5953 		return -ENOMEM;
5954 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5955 	return r < 0 ? r : 0;
5956 }
5957 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
5958 
__free_bus(struct rcu_head * rcu)5959 static void __free_bus(struct rcu_head *rcu)
5960 {
5961 	struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
5962 
5963 	kfree(bus);
5964 }
5965 
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5966 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5967 			    int len, struct kvm_io_device *dev)
5968 {
5969 	int i;
5970 	struct kvm_io_bus *new_bus, *bus;
5971 	struct kvm_io_range range;
5972 
5973 	lockdep_assert_held(&kvm->slots_lock);
5974 
5975 	bus = kvm_get_bus(kvm, bus_idx);
5976 	if (!bus)
5977 		return -ENOMEM;
5978 
5979 	/* exclude ioeventfd which is limited by maximum fd */
5980 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5981 		return -ENOSPC;
5982 
5983 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5984 			  GFP_KERNEL_ACCOUNT);
5985 	if (!new_bus)
5986 		return -ENOMEM;
5987 
5988 	range = (struct kvm_io_range) {
5989 		.addr = addr,
5990 		.len = len,
5991 		.dev = dev,
5992 	};
5993 
5994 	for (i = 0; i < bus->dev_count; i++)
5995 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5996 			break;
5997 
5998 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5999 	new_bus->dev_count++;
6000 	new_bus->range[i] = range;
6001 	memcpy(new_bus->range + i + 1, bus->range + i,
6002 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
6003 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6004 	call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
6005 
6006 	return 0;
6007 }
6008 
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)6009 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6010 			      struct kvm_io_device *dev)
6011 {
6012 	int i;
6013 	struct kvm_io_bus *new_bus, *bus;
6014 
6015 	lockdep_assert_held(&kvm->slots_lock);
6016 
6017 	bus = kvm_get_bus(kvm, bus_idx);
6018 	if (!bus)
6019 		return 0;
6020 
6021 	for (i = 0; i < bus->dev_count; i++) {
6022 		if (bus->range[i].dev == dev) {
6023 			break;
6024 		}
6025 	}
6026 
6027 	if (i == bus->dev_count)
6028 		return 0;
6029 
6030 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
6031 			  GFP_KERNEL_ACCOUNT);
6032 	if (new_bus) {
6033 		memcpy(new_bus, bus, struct_size(bus, range, i));
6034 		new_bus->dev_count--;
6035 		memcpy(new_bus->range + i, bus->range + i + 1,
6036 				flex_array_size(new_bus, range, new_bus->dev_count - i));
6037 	}
6038 
6039 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6040 	synchronize_srcu_expedited(&kvm->srcu);
6041 
6042 	/*
6043 	 * If NULL bus is installed, destroy the old bus, including all the
6044 	 * attached devices. Otherwise, destroy the caller's device only.
6045 	 */
6046 	if (!new_bus) {
6047 		pr_err("kvm: failed to shrink bus, removing it completely\n");
6048 		kvm_io_bus_destroy(bus);
6049 		return -ENOMEM;
6050 	}
6051 
6052 	kvm_iodevice_destructor(dev);
6053 	kfree(bus);
6054 	return 0;
6055 }
6056 
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)6057 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6058 					 gpa_t addr)
6059 {
6060 	struct kvm_io_bus *bus;
6061 	int dev_idx, srcu_idx;
6062 	struct kvm_io_device *iodev = NULL;
6063 
6064 	srcu_idx = srcu_read_lock(&kvm->srcu);
6065 
6066 	bus = kvm_get_bus_srcu(kvm, bus_idx);
6067 	if (!bus)
6068 		goto out_unlock;
6069 
6070 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6071 	if (dev_idx < 0)
6072 		goto out_unlock;
6073 
6074 	iodev = bus->range[dev_idx].dev;
6075 
6076 out_unlock:
6077 	srcu_read_unlock(&kvm->srcu, srcu_idx);
6078 
6079 	return iodev;
6080 }
6081 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
6082 
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)6083 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6084 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
6085 			   const char *fmt)
6086 {
6087 	int ret;
6088 	struct kvm_stat_data *stat_data = inode->i_private;
6089 
6090 	/*
6091 	 * The debugfs files are a reference to the kvm struct which
6092         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
6093         * avoids the race between open and the removal of the debugfs directory.
6094 	 */
6095 	if (!kvm_get_kvm_safe(stat_data->kvm))
6096 		return -ENOENT;
6097 
6098 	ret = simple_attr_open(inode, file, get,
6099 			       kvm_stats_debugfs_mode(stat_data->desc) & 0222
6100 			       ? set : NULL, fmt);
6101 	if (ret)
6102 		kvm_put_kvm(stat_data->kvm);
6103 
6104 	return ret;
6105 }
6106 
kvm_debugfs_release(struct inode * inode,struct file * file)6107 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6108 {
6109 	struct kvm_stat_data *stat_data = inode->i_private;
6110 
6111 	simple_attr_release(inode, file);
6112 	kvm_put_kvm(stat_data->kvm);
6113 
6114 	return 0;
6115 }
6116 
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)6117 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6118 {
6119 	*val = *(u64 *)((void *)(&kvm->stat) + offset);
6120 
6121 	return 0;
6122 }
6123 
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)6124 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6125 {
6126 	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
6127 
6128 	return 0;
6129 }
6130 
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)6131 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6132 {
6133 	unsigned long i;
6134 	struct kvm_vcpu *vcpu;
6135 
6136 	*val = 0;
6137 
6138 	kvm_for_each_vcpu(i, vcpu, kvm)
6139 		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
6140 
6141 	return 0;
6142 }
6143 
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)6144 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6145 {
6146 	unsigned long i;
6147 	struct kvm_vcpu *vcpu;
6148 
6149 	kvm_for_each_vcpu(i, vcpu, kvm)
6150 		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6151 
6152 	return 0;
6153 }
6154 
kvm_stat_data_get(void * data,u64 * val)6155 static int kvm_stat_data_get(void *data, u64 *val)
6156 {
6157 	int r = -EFAULT;
6158 	struct kvm_stat_data *stat_data = data;
6159 
6160 	switch (stat_data->kind) {
6161 	case KVM_STAT_VM:
6162 		r = kvm_get_stat_per_vm(stat_data->kvm,
6163 					stat_data->desc->desc.offset, val);
6164 		break;
6165 	case KVM_STAT_VCPU:
6166 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
6167 					  stat_data->desc->desc.offset, val);
6168 		break;
6169 	}
6170 
6171 	return r;
6172 }
6173 
kvm_stat_data_clear(void * data,u64 val)6174 static int kvm_stat_data_clear(void *data, u64 val)
6175 {
6176 	int r = -EFAULT;
6177 	struct kvm_stat_data *stat_data = data;
6178 
6179 	if (val)
6180 		return -EINVAL;
6181 
6182 	switch (stat_data->kind) {
6183 	case KVM_STAT_VM:
6184 		r = kvm_clear_stat_per_vm(stat_data->kvm,
6185 					  stat_data->desc->desc.offset);
6186 		break;
6187 	case KVM_STAT_VCPU:
6188 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6189 					    stat_data->desc->desc.offset);
6190 		break;
6191 	}
6192 
6193 	return r;
6194 }
6195 
kvm_stat_data_open(struct inode * inode,struct file * file)6196 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6197 {
6198 	__simple_attr_check_format("%llu\n", 0ull);
6199 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6200 				kvm_stat_data_clear, "%llu\n");
6201 }
6202 
6203 static const struct file_operations stat_fops_per_vm = {
6204 	.owner = THIS_MODULE,
6205 	.open = kvm_stat_data_open,
6206 	.release = kvm_debugfs_release,
6207 	.read = simple_attr_read,
6208 	.write = simple_attr_write,
6209 };
6210 
vm_stat_get(void * _offset,u64 * val)6211 static int vm_stat_get(void *_offset, u64 *val)
6212 {
6213 	unsigned offset = (long)_offset;
6214 	struct kvm *kvm;
6215 	u64 tmp_val;
6216 
6217 	*val = 0;
6218 	mutex_lock(&kvm_lock);
6219 	list_for_each_entry(kvm, &vm_list, vm_list) {
6220 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6221 		*val += tmp_val;
6222 	}
6223 	mutex_unlock(&kvm_lock);
6224 	return 0;
6225 }
6226 
vm_stat_clear(void * _offset,u64 val)6227 static int vm_stat_clear(void *_offset, u64 val)
6228 {
6229 	unsigned offset = (long)_offset;
6230 	struct kvm *kvm;
6231 
6232 	if (val)
6233 		return -EINVAL;
6234 
6235 	mutex_lock(&kvm_lock);
6236 	list_for_each_entry(kvm, &vm_list, vm_list) {
6237 		kvm_clear_stat_per_vm(kvm, offset);
6238 	}
6239 	mutex_unlock(&kvm_lock);
6240 
6241 	return 0;
6242 }
6243 
6244 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6245 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6246 
vcpu_stat_get(void * _offset,u64 * val)6247 static int vcpu_stat_get(void *_offset, u64 *val)
6248 {
6249 	unsigned offset = (long)_offset;
6250 	struct kvm *kvm;
6251 	u64 tmp_val;
6252 
6253 	*val = 0;
6254 	mutex_lock(&kvm_lock);
6255 	list_for_each_entry(kvm, &vm_list, vm_list) {
6256 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6257 		*val += tmp_val;
6258 	}
6259 	mutex_unlock(&kvm_lock);
6260 	return 0;
6261 }
6262 
vcpu_stat_clear(void * _offset,u64 val)6263 static int vcpu_stat_clear(void *_offset, u64 val)
6264 {
6265 	unsigned offset = (long)_offset;
6266 	struct kvm *kvm;
6267 
6268 	if (val)
6269 		return -EINVAL;
6270 
6271 	mutex_lock(&kvm_lock);
6272 	list_for_each_entry(kvm, &vm_list, vm_list) {
6273 		kvm_clear_stat_per_vcpu(kvm, offset);
6274 	}
6275 	mutex_unlock(&kvm_lock);
6276 
6277 	return 0;
6278 }
6279 
6280 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6281 			"%llu\n");
6282 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6283 
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)6284 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6285 {
6286 	struct kobj_uevent_env *env;
6287 	unsigned long long created, active;
6288 
6289 	if (!kvm_dev.this_device || !kvm)
6290 		return;
6291 
6292 	mutex_lock(&kvm_lock);
6293 	if (type == KVM_EVENT_CREATE_VM) {
6294 		kvm_createvm_count++;
6295 		kvm_active_vms++;
6296 	} else if (type == KVM_EVENT_DESTROY_VM) {
6297 		kvm_active_vms--;
6298 	}
6299 	created = kvm_createvm_count;
6300 	active = kvm_active_vms;
6301 	mutex_unlock(&kvm_lock);
6302 
6303 	env = kzalloc(sizeof(*env), GFP_KERNEL);
6304 	if (!env)
6305 		return;
6306 
6307 	add_uevent_var(env, "CREATED=%llu", created);
6308 	add_uevent_var(env, "COUNT=%llu", active);
6309 
6310 	if (type == KVM_EVENT_CREATE_VM) {
6311 		add_uevent_var(env, "EVENT=create");
6312 		kvm->userspace_pid = task_pid_nr(current);
6313 	} else if (type == KVM_EVENT_DESTROY_VM) {
6314 		add_uevent_var(env, "EVENT=destroy");
6315 	}
6316 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6317 
6318 	if (!IS_ERR(kvm->debugfs_dentry)) {
6319 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6320 
6321 		if (p) {
6322 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6323 			if (!IS_ERR(tmp))
6324 				add_uevent_var(env, "STATS_PATH=%s", tmp);
6325 			kfree(p);
6326 		}
6327 	}
6328 	/* no need for checks, since we are adding at most only 5 keys */
6329 	env->envp[env->envp_idx++] = NULL;
6330 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6331 	kfree(env);
6332 }
6333 
kvm_init_debug(void)6334 static void kvm_init_debug(void)
6335 {
6336 	const struct file_operations *fops;
6337 	const struct _kvm_stats_desc *pdesc;
6338 	int i;
6339 
6340 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6341 
6342 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6343 		pdesc = &kvm_vm_stats_desc[i];
6344 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6345 			fops = &vm_stat_fops;
6346 		else
6347 			fops = &vm_stat_readonly_fops;
6348 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6349 				kvm_debugfs_dir,
6350 				(void *)(long)pdesc->desc.offset, fops);
6351 	}
6352 
6353 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6354 		pdesc = &kvm_vcpu_stats_desc[i];
6355 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6356 			fops = &vcpu_stat_fops;
6357 		else
6358 			fops = &vcpu_stat_readonly_fops;
6359 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6360 				kvm_debugfs_dir,
6361 				(void *)(long)pdesc->desc.offset, fops);
6362 	}
6363 }
6364 
6365 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)6366 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6367 {
6368 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
6369 }
6370 
kvm_sched_in(struct preempt_notifier * pn,int cpu)6371 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6372 {
6373 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6374 
6375 	WRITE_ONCE(vcpu->preempted, false);
6376 	WRITE_ONCE(vcpu->ready, false);
6377 
6378 	__this_cpu_write(kvm_running_vcpu, vcpu);
6379 	kvm_arch_vcpu_load(vcpu, cpu);
6380 
6381 	WRITE_ONCE(vcpu->scheduled_out, false);
6382 }
6383 
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)6384 static void kvm_sched_out(struct preempt_notifier *pn,
6385 			  struct task_struct *next)
6386 {
6387 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6388 
6389 	WRITE_ONCE(vcpu->scheduled_out, true);
6390 
6391 	if (task_is_runnable(current) && vcpu->wants_to_run) {
6392 		WRITE_ONCE(vcpu->preempted, true);
6393 		WRITE_ONCE(vcpu->ready, true);
6394 	}
6395 	kvm_arch_vcpu_put(vcpu);
6396 	__this_cpu_write(kvm_running_vcpu, NULL);
6397 }
6398 
6399 /**
6400  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6401  *
6402  * We can disable preemption locally around accessing the per-CPU variable,
6403  * and use the resolved vcpu pointer after enabling preemption again,
6404  * because even if the current thread is migrated to another CPU, reading
6405  * the per-CPU value later will give us the same value as we update the
6406  * per-CPU variable in the preempt notifier handlers.
6407  */
kvm_get_running_vcpu(void)6408 struct kvm_vcpu *kvm_get_running_vcpu(void)
6409 {
6410 	struct kvm_vcpu *vcpu;
6411 
6412 	preempt_disable();
6413 	vcpu = __this_cpu_read(kvm_running_vcpu);
6414 	preempt_enable();
6415 
6416 	return vcpu;
6417 }
6418 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
6419 
6420 /**
6421  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6422  */
kvm_get_running_vcpus(void)6423 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6424 {
6425         return &kvm_running_vcpu;
6426 }
6427 
6428 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6429 static unsigned int kvm_guest_state(void)
6430 {
6431 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6432 	unsigned int state;
6433 
6434 	if (!kvm_arch_pmi_in_guest(vcpu))
6435 		return 0;
6436 
6437 	state = PERF_GUEST_ACTIVE;
6438 	if (!kvm_arch_vcpu_in_kernel(vcpu))
6439 		state |= PERF_GUEST_USER;
6440 
6441 	return state;
6442 }
6443 
kvm_guest_get_ip(void)6444 static unsigned long kvm_guest_get_ip(void)
6445 {
6446 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6447 
6448 	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6449 	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6450 		return 0;
6451 
6452 	return kvm_arch_vcpu_get_ip(vcpu);
6453 }
6454 
6455 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6456 	.state			= kvm_guest_state,
6457 	.get_ip			= kvm_guest_get_ip,
6458 	.handle_intel_pt_intr	= NULL,
6459 };
6460 
kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void))6461 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6462 {
6463 	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6464 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6465 }
kvm_unregister_perf_callbacks(void)6466 void kvm_unregister_perf_callbacks(void)
6467 {
6468 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6469 }
6470 #endif
6471 
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)6472 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6473 {
6474 	int r;
6475 	int cpu;
6476 
6477 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
6478 	if (!vcpu_align)
6479 		vcpu_align = __alignof__(struct kvm_vcpu);
6480 	kvm_vcpu_cache =
6481 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6482 					   SLAB_ACCOUNT,
6483 					   offsetof(struct kvm_vcpu, arch),
6484 					   offsetofend(struct kvm_vcpu, stats_id)
6485 					   - offsetof(struct kvm_vcpu, arch),
6486 					   NULL);
6487 	if (!kvm_vcpu_cache)
6488 		return -ENOMEM;
6489 
6490 	for_each_possible_cpu(cpu) {
6491 		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6492 					    GFP_KERNEL, cpu_to_node(cpu))) {
6493 			r = -ENOMEM;
6494 			goto err_cpu_kick_mask;
6495 		}
6496 	}
6497 
6498 	r = kvm_irqfd_init();
6499 	if (r)
6500 		goto err_irqfd;
6501 
6502 	r = kvm_async_pf_init();
6503 	if (r)
6504 		goto err_async_pf;
6505 
6506 	kvm_chardev_ops.owner = module;
6507 	kvm_vm_fops.owner = module;
6508 	kvm_vcpu_fops.owner = module;
6509 	kvm_device_fops.owner = module;
6510 
6511 	kvm_preempt_ops.sched_in = kvm_sched_in;
6512 	kvm_preempt_ops.sched_out = kvm_sched_out;
6513 
6514 	kvm_init_debug();
6515 
6516 	r = kvm_vfio_ops_init();
6517 	if (WARN_ON_ONCE(r))
6518 		goto err_vfio;
6519 
6520 	kvm_gmem_init(module);
6521 
6522 	r = kvm_init_virtualization();
6523 	if (r)
6524 		goto err_virt;
6525 
6526 	/*
6527 	 * Registration _must_ be the very last thing done, as this exposes
6528 	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6529 	 */
6530 	r = misc_register(&kvm_dev);
6531 	if (r) {
6532 		pr_err("kvm: misc device register failed\n");
6533 		goto err_register;
6534 	}
6535 
6536 	return 0;
6537 
6538 err_register:
6539 	kvm_uninit_virtualization();
6540 err_virt:
6541 	kvm_vfio_ops_exit();
6542 err_vfio:
6543 	kvm_async_pf_deinit();
6544 err_async_pf:
6545 	kvm_irqfd_exit();
6546 err_irqfd:
6547 err_cpu_kick_mask:
6548 	for_each_possible_cpu(cpu)
6549 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6550 	kmem_cache_destroy(kvm_vcpu_cache);
6551 	return r;
6552 }
6553 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
6554 
kvm_exit(void)6555 void kvm_exit(void)
6556 {
6557 	int cpu;
6558 
6559 	/*
6560 	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6561 	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6562 	 * to KVM while the module is being stopped.
6563 	 */
6564 	misc_deregister(&kvm_dev);
6565 
6566 	kvm_uninit_virtualization();
6567 
6568 	debugfs_remove_recursive(kvm_debugfs_dir);
6569 	for_each_possible_cpu(cpu)
6570 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6571 	kmem_cache_destroy(kvm_vcpu_cache);
6572 	kvm_vfio_ops_exit();
6573 	kvm_async_pf_deinit();
6574 	kvm_irqfd_exit();
6575 }
6576 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);
6577