xref: /linux/virt/kvm/kvm_main.c (revision 6b8a024d25ebf7535eb4a3e926309aa693cfe1bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine (KVM) Hypervisor
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  *
8  * Authors:
9  *   Avi Kivity   <avi@qumranet.com>
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  */
12 
13 #include <kvm/iodev.h>
14 
15 #include <linux/kvm_host.h>
16 #include <linux/kvm.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21 #include <linux/miscdevice.h>
22 #include <linux/vmalloc.h>
23 #include <linux/reboot.h>
24 #include <linux/debugfs.h>
25 #include <linux/highmem.h>
26 #include <linux/file.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/cpu.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/stat.h>
32 #include <linux/cpumask.h>
33 #include <linux/smp.h>
34 #include <linux/anon_inodes.h>
35 #include <linux/profile.h>
36 #include <linux/kvm_para.h>
37 #include <linux/pagemap.h>
38 #include <linux/mman.h>
39 #include <linux/swap.h>
40 #include <linux/bitops.h>
41 #include <linux/spinlock.h>
42 #include <linux/compat.h>
43 #include <linux/srcu.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46 #include <linux/sort.h>
47 #include <linux/bsearch.h>
48 #include <linux/io.h>
49 #include <linux/lockdep.h>
50 #include <linux/kthread.h>
51 #include <linux/suspend.h>
52 
53 #include <asm/processor.h>
54 #include <asm/ioctl.h>
55 #include <linux/uaccess.h>
56 
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59 #include "kvm_mm.h"
60 #include "vfio.h"
61 
62 #include <trace/events/ipi.h>
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
66 
67 #include <linux/kvm_dirty_ring.h>
68 
69 
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72 
73 MODULE_AUTHOR("Qumranet");
74 MODULE_DESCRIPTION("Kernel-based Virtual Machine (KVM) Hypervisor");
75 MODULE_LICENSE("GPL");
76 
77 /* Architectures should define their poll value according to the halt latency */
78 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
79 module_param(halt_poll_ns, uint, 0644);
80 EXPORT_SYMBOL_GPL(halt_poll_ns);
81 
82 /* Default doubles per-vcpu halt_poll_ns. */
83 unsigned int halt_poll_ns_grow = 2;
84 module_param(halt_poll_ns_grow, uint, 0644);
85 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
86 
87 /* The start value to grow halt_poll_ns from */
88 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
89 module_param(halt_poll_ns_grow_start, uint, 0644);
90 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
91 
92 /* Default halves per-vcpu halt_poll_ns. */
93 unsigned int halt_poll_ns_shrink = 2;
94 module_param(halt_poll_ns_shrink, uint, 0644);
95 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
96 
97 /*
98  * Ordering of locks:
99  *
100  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
101  */
102 
103 DEFINE_MUTEX(kvm_lock);
104 LIST_HEAD(vm_list);
105 
106 static struct kmem_cache *kvm_vcpu_cache;
107 
108 static __read_mostly struct preempt_ops kvm_preempt_ops;
109 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
110 
111 static struct dentry *kvm_debugfs_dir;
112 
113 static const struct file_operations stat_fops_per_vm;
114 
115 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
116 			   unsigned long arg);
117 #ifdef CONFIG_KVM_COMPAT
118 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
119 				  unsigned long arg);
120 #define KVM_COMPAT(c)	.compat_ioctl	= (c)
121 #else
122 /*
123  * For architectures that don't implement a compat infrastructure,
124  * adopt a double line of defense:
125  * - Prevent a compat task from opening /dev/kvm
126  * - If the open has been done by a 64bit task, and the KVM fd
127  *   passed to a compat task, let the ioctls fail.
128  */
129 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
130 				unsigned long arg) { return -EINVAL; }
131 
132 static int kvm_no_compat_open(struct inode *inode, struct file *file)
133 {
134 	return is_compat_task() ? -ENODEV : 0;
135 }
136 #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
137 			.open		= kvm_no_compat_open
138 #endif
139 static int hardware_enable_all(void);
140 static void hardware_disable_all(void);
141 
142 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
143 
144 #define KVM_EVENT_CREATE_VM 0
145 #define KVM_EVENT_DESTROY_VM 1
146 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
147 static unsigned long long kvm_createvm_count;
148 static unsigned long long kvm_active_vms;
149 
150 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
151 
152 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
153 {
154 }
155 
156 bool kvm_is_zone_device_page(struct page *page)
157 {
158 	/*
159 	 * The metadata used by is_zone_device_page() to determine whether or
160 	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
161 	 * the device has been pinned, e.g. by get_user_pages().  WARN if the
162 	 * page_count() is zero to help detect bad usage of this helper.
163 	 */
164 	if (WARN_ON_ONCE(!page_count(page)))
165 		return false;
166 
167 	return is_zone_device_page(page);
168 }
169 
170 /*
171  * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
172  * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
173  * is likely incomplete, it has been compiled purely through people wanting to
174  * back guest with a certain type of memory and encountering issues.
175  */
176 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
177 {
178 	struct page *page;
179 
180 	if (!pfn_valid(pfn))
181 		return NULL;
182 
183 	page = pfn_to_page(pfn);
184 	if (!PageReserved(page))
185 		return page;
186 
187 	/* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
188 	if (is_zero_pfn(pfn))
189 		return page;
190 
191 	/*
192 	 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
193 	 * perspective they are "normal" pages, albeit with slightly different
194 	 * usage rules.
195 	 */
196 	if (kvm_is_zone_device_page(page))
197 		return page;
198 
199 	return NULL;
200 }
201 
202 /*
203  * Switches to specified vcpu, until a matching vcpu_put()
204  */
205 void vcpu_load(struct kvm_vcpu *vcpu)
206 {
207 	int cpu = get_cpu();
208 
209 	__this_cpu_write(kvm_running_vcpu, vcpu);
210 	preempt_notifier_register(&vcpu->preempt_notifier);
211 	kvm_arch_vcpu_load(vcpu, cpu);
212 	put_cpu();
213 }
214 EXPORT_SYMBOL_GPL(vcpu_load);
215 
216 void vcpu_put(struct kvm_vcpu *vcpu)
217 {
218 	preempt_disable();
219 	kvm_arch_vcpu_put(vcpu);
220 	preempt_notifier_unregister(&vcpu->preempt_notifier);
221 	__this_cpu_write(kvm_running_vcpu, NULL);
222 	preempt_enable();
223 }
224 EXPORT_SYMBOL_GPL(vcpu_put);
225 
226 /* TODO: merge with kvm_arch_vcpu_should_kick */
227 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228 {
229 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230 
231 	/*
232 	 * We need to wait for the VCPU to reenable interrupts and get out of
233 	 * READING_SHADOW_PAGE_TABLES mode.
234 	 */
235 	if (req & KVM_REQUEST_WAIT)
236 		return mode != OUTSIDE_GUEST_MODE;
237 
238 	/*
239 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 	 */
241 	return mode == IN_GUEST_MODE;
242 }
243 
244 static void ack_kick(void *_completed)
245 {
246 }
247 
248 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
249 {
250 	if (cpumask_empty(cpus))
251 		return false;
252 
253 	smp_call_function_many(cpus, ack_kick, NULL, wait);
254 	return true;
255 }
256 
257 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
258 				  struct cpumask *tmp, int current_cpu)
259 {
260 	int cpu;
261 
262 	if (likely(!(req & KVM_REQUEST_NO_ACTION)))
263 		__kvm_make_request(req, vcpu);
264 
265 	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
266 		return;
267 
268 	/*
269 	 * Note, the vCPU could get migrated to a different pCPU at any point
270 	 * after kvm_request_needs_ipi(), which could result in sending an IPI
271 	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
272 	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
273 	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
274 	 * after this point is also OK, as the requirement is only that KVM wait
275 	 * for vCPUs that were reading SPTEs _before_ any changes were
276 	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
277 	 */
278 	if (kvm_request_needs_ipi(vcpu, req)) {
279 		cpu = READ_ONCE(vcpu->cpu);
280 		if (cpu != -1 && cpu != current_cpu)
281 			__cpumask_set_cpu(cpu, tmp);
282 	}
283 }
284 
285 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
286 				 unsigned long *vcpu_bitmap)
287 {
288 	struct kvm_vcpu *vcpu;
289 	struct cpumask *cpus;
290 	int i, me;
291 	bool called;
292 
293 	me = get_cpu();
294 
295 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
296 	cpumask_clear(cpus);
297 
298 	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
299 		vcpu = kvm_get_vcpu(kvm, i);
300 		if (!vcpu)
301 			continue;
302 		kvm_make_vcpu_request(vcpu, req, cpus, me);
303 	}
304 
305 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
306 	put_cpu();
307 
308 	return called;
309 }
310 
311 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
312 {
313 	struct kvm_vcpu *vcpu;
314 	struct cpumask *cpus;
315 	unsigned long i;
316 	bool called;
317 	int me;
318 
319 	me = get_cpu();
320 
321 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
322 	cpumask_clear(cpus);
323 
324 	kvm_for_each_vcpu(i, vcpu, kvm)
325 		kvm_make_vcpu_request(vcpu, req, cpus, me);
326 
327 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
328 	put_cpu();
329 
330 	return called;
331 }
332 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
333 
334 void kvm_flush_remote_tlbs(struct kvm *kvm)
335 {
336 	++kvm->stat.generic.remote_tlb_flush_requests;
337 
338 	/*
339 	 * We want to publish modifications to the page tables before reading
340 	 * mode. Pairs with a memory barrier in arch-specific code.
341 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
342 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
343 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
344 	 *
345 	 * There is already an smp_mb__after_atomic() before
346 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
347 	 * barrier here.
348 	 */
349 	if (!kvm_arch_flush_remote_tlbs(kvm)
350 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
351 		++kvm->stat.generic.remote_tlb_flush;
352 }
353 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
354 
355 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
356 {
357 	if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
358 		return;
359 
360 	/*
361 	 * Fall back to a flushing entire TLBs if the architecture range-based
362 	 * TLB invalidation is unsupported or can't be performed for whatever
363 	 * reason.
364 	 */
365 	kvm_flush_remote_tlbs(kvm);
366 }
367 
368 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
369 				   const struct kvm_memory_slot *memslot)
370 {
371 	/*
372 	 * All current use cases for flushing the TLBs for a specific memslot
373 	 * are related to dirty logging, and many do the TLB flush out of
374 	 * mmu_lock. The interaction between the various operations on memslot
375 	 * must be serialized by slots_locks to ensure the TLB flush from one
376 	 * operation is observed by any other operation on the same memslot.
377 	 */
378 	lockdep_assert_held(&kvm->slots_lock);
379 	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
380 }
381 
382 static void kvm_flush_shadow_all(struct kvm *kvm)
383 {
384 	kvm_arch_flush_shadow_all(kvm);
385 	kvm_arch_guest_memory_reclaimed(kvm);
386 }
387 
388 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
389 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
390 					       gfp_t gfp_flags)
391 {
392 	void *page;
393 
394 	gfp_flags |= mc->gfp_zero;
395 
396 	if (mc->kmem_cache)
397 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
398 
399 	page = (void *)__get_free_page(gfp_flags);
400 	if (page && mc->init_value)
401 		memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64));
402 	return page;
403 }
404 
405 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
406 {
407 	gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
408 	void *obj;
409 
410 	if (mc->nobjs >= min)
411 		return 0;
412 
413 	if (unlikely(!mc->objects)) {
414 		if (WARN_ON_ONCE(!capacity))
415 			return -EIO;
416 
417 		/*
418 		 * Custom init values can be used only for page allocations,
419 		 * and obviously conflict with __GFP_ZERO.
420 		 */
421 		if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
422 			return -EIO;
423 
424 		mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp);
425 		if (!mc->objects)
426 			return -ENOMEM;
427 
428 		mc->capacity = capacity;
429 	}
430 
431 	/* It is illegal to request a different capacity across topups. */
432 	if (WARN_ON_ONCE(mc->capacity != capacity))
433 		return -EIO;
434 
435 	while (mc->nobjs < mc->capacity) {
436 		obj = mmu_memory_cache_alloc_obj(mc, gfp);
437 		if (!obj)
438 			return mc->nobjs >= min ? 0 : -ENOMEM;
439 		mc->objects[mc->nobjs++] = obj;
440 	}
441 	return 0;
442 }
443 
444 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
445 {
446 	return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
447 }
448 
449 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
450 {
451 	return mc->nobjs;
452 }
453 
454 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
455 {
456 	while (mc->nobjs) {
457 		if (mc->kmem_cache)
458 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
459 		else
460 			free_page((unsigned long)mc->objects[--mc->nobjs]);
461 	}
462 
463 	kvfree(mc->objects);
464 
465 	mc->objects = NULL;
466 	mc->capacity = 0;
467 }
468 
469 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
470 {
471 	void *p;
472 
473 	if (WARN_ON(!mc->nobjs))
474 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
475 	else
476 		p = mc->objects[--mc->nobjs];
477 	BUG_ON(!p);
478 	return p;
479 }
480 #endif
481 
482 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
483 {
484 	mutex_init(&vcpu->mutex);
485 	vcpu->cpu = -1;
486 	vcpu->kvm = kvm;
487 	vcpu->vcpu_id = id;
488 	vcpu->pid = NULL;
489 #ifndef __KVM_HAVE_ARCH_WQP
490 	rcuwait_init(&vcpu->wait);
491 #endif
492 	kvm_async_pf_vcpu_init(vcpu);
493 
494 	kvm_vcpu_set_in_spin_loop(vcpu, false);
495 	kvm_vcpu_set_dy_eligible(vcpu, false);
496 	vcpu->preempted = false;
497 	vcpu->ready = false;
498 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
499 	vcpu->last_used_slot = NULL;
500 
501 	/* Fill the stats id string for the vcpu */
502 	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
503 		 task_pid_nr(current), id);
504 }
505 
506 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
507 {
508 	kvm_arch_vcpu_destroy(vcpu);
509 	kvm_dirty_ring_free(&vcpu->dirty_ring);
510 
511 	/*
512 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
513 	 * the vcpu->pid pointer, and at destruction time all file descriptors
514 	 * are already gone.
515 	 */
516 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
517 
518 	free_page((unsigned long)vcpu->run);
519 	kmem_cache_free(kvm_vcpu_cache, vcpu);
520 }
521 
522 void kvm_destroy_vcpus(struct kvm *kvm)
523 {
524 	unsigned long i;
525 	struct kvm_vcpu *vcpu;
526 
527 	kvm_for_each_vcpu(i, vcpu, kvm) {
528 		kvm_vcpu_destroy(vcpu);
529 		xa_erase(&kvm->vcpu_array, i);
530 	}
531 
532 	atomic_set(&kvm->online_vcpus, 0);
533 }
534 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
535 
536 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
537 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
538 {
539 	return container_of(mn, struct kvm, mmu_notifier);
540 }
541 
542 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
543 
544 typedef void (*on_lock_fn_t)(struct kvm *kvm);
545 
546 struct kvm_mmu_notifier_range {
547 	/*
548 	 * 64-bit addresses, as KVM notifiers can operate on host virtual
549 	 * addresses (unsigned long) and guest physical addresses (64-bit).
550 	 */
551 	u64 start;
552 	u64 end;
553 	union kvm_mmu_notifier_arg arg;
554 	gfn_handler_t handler;
555 	on_lock_fn_t on_lock;
556 	bool flush_on_ret;
557 	bool may_block;
558 };
559 
560 /*
561  * The inner-most helper returns a tuple containing the return value from the
562  * arch- and action-specific handler, plus a flag indicating whether or not at
563  * least one memslot was found, i.e. if the handler found guest memory.
564  *
565  * Note, most notifiers are averse to booleans, so even though KVM tracks the
566  * return from arch code as a bool, outer helpers will cast it to an int. :-(
567  */
568 typedef struct kvm_mmu_notifier_return {
569 	bool ret;
570 	bool found_memslot;
571 } kvm_mn_ret_t;
572 
573 /*
574  * Use a dedicated stub instead of NULL to indicate that there is no callback
575  * function/handler.  The compiler technically can't guarantee that a real
576  * function will have a non-zero address, and so it will generate code to
577  * check for !NULL, whereas comparing against a stub will be elided at compile
578  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
579  */
580 static void kvm_null_fn(void)
581 {
582 
583 }
584 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
585 
586 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
587 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
588 	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
589 	     node;							     \
590 	     node = interval_tree_iter_next(node, start, last))	     \
591 
592 static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
593 							   const struct kvm_mmu_notifier_range *range)
594 {
595 	struct kvm_mmu_notifier_return r = {
596 		.ret = false,
597 		.found_memslot = false,
598 	};
599 	struct kvm_gfn_range gfn_range;
600 	struct kvm_memory_slot *slot;
601 	struct kvm_memslots *slots;
602 	int i, idx;
603 
604 	if (WARN_ON_ONCE(range->end <= range->start))
605 		return r;
606 
607 	/* A null handler is allowed if and only if on_lock() is provided. */
608 	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
609 			 IS_KVM_NULL_FN(range->handler)))
610 		return r;
611 
612 	idx = srcu_read_lock(&kvm->srcu);
613 
614 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
615 		struct interval_tree_node *node;
616 
617 		slots = __kvm_memslots(kvm, i);
618 		kvm_for_each_memslot_in_hva_range(node, slots,
619 						  range->start, range->end - 1) {
620 			unsigned long hva_start, hva_end;
621 
622 			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
623 			hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
624 			hva_end = min_t(unsigned long, range->end,
625 					slot->userspace_addr + (slot->npages << PAGE_SHIFT));
626 
627 			/*
628 			 * To optimize for the likely case where the address
629 			 * range is covered by zero or one memslots, don't
630 			 * bother making these conditional (to avoid writes on
631 			 * the second or later invocation of the handler).
632 			 */
633 			gfn_range.arg = range->arg;
634 			gfn_range.may_block = range->may_block;
635 
636 			/*
637 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
638 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
639 			 */
640 			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
641 			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
642 			gfn_range.slot = slot;
643 
644 			if (!r.found_memslot) {
645 				r.found_memslot = true;
646 				KVM_MMU_LOCK(kvm);
647 				if (!IS_KVM_NULL_FN(range->on_lock))
648 					range->on_lock(kvm);
649 
650 				if (IS_KVM_NULL_FN(range->handler))
651 					goto mmu_unlock;
652 			}
653 			r.ret |= range->handler(kvm, &gfn_range);
654 		}
655 	}
656 
657 	if (range->flush_on_ret && r.ret)
658 		kvm_flush_remote_tlbs(kvm);
659 
660 mmu_unlock:
661 	if (r.found_memslot)
662 		KVM_MMU_UNLOCK(kvm);
663 
664 	srcu_read_unlock(&kvm->srcu, idx);
665 
666 	return r;
667 }
668 
669 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
670 						unsigned long start,
671 						unsigned long end,
672 						gfn_handler_t handler)
673 {
674 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
675 	const struct kvm_mmu_notifier_range range = {
676 		.start		= start,
677 		.end		= end,
678 		.handler	= handler,
679 		.on_lock	= (void *)kvm_null_fn,
680 		.flush_on_ret	= true,
681 		.may_block	= false,
682 	};
683 
684 	return __kvm_handle_hva_range(kvm, &range).ret;
685 }
686 
687 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
688 							 unsigned long start,
689 							 unsigned long end,
690 							 gfn_handler_t handler)
691 {
692 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
693 	const struct kvm_mmu_notifier_range range = {
694 		.start		= start,
695 		.end		= end,
696 		.handler	= handler,
697 		.on_lock	= (void *)kvm_null_fn,
698 		.flush_on_ret	= false,
699 		.may_block	= false,
700 	};
701 
702 	return __kvm_handle_hva_range(kvm, &range).ret;
703 }
704 
705 void kvm_mmu_invalidate_begin(struct kvm *kvm)
706 {
707 	lockdep_assert_held_write(&kvm->mmu_lock);
708 	/*
709 	 * The count increase must become visible at unlock time as no
710 	 * spte can be established without taking the mmu_lock and
711 	 * count is also read inside the mmu_lock critical section.
712 	 */
713 	kvm->mmu_invalidate_in_progress++;
714 
715 	if (likely(kvm->mmu_invalidate_in_progress == 1)) {
716 		kvm->mmu_invalidate_range_start = INVALID_GPA;
717 		kvm->mmu_invalidate_range_end = INVALID_GPA;
718 	}
719 }
720 
721 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
722 {
723 	lockdep_assert_held_write(&kvm->mmu_lock);
724 
725 	WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
726 
727 	if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
728 		kvm->mmu_invalidate_range_start = start;
729 		kvm->mmu_invalidate_range_end = end;
730 	} else {
731 		/*
732 		 * Fully tracking multiple concurrent ranges has diminishing
733 		 * returns. Keep things simple and just find the minimal range
734 		 * which includes the current and new ranges. As there won't be
735 		 * enough information to subtract a range after its invalidate
736 		 * completes, any ranges invalidated concurrently will
737 		 * accumulate and persist until all outstanding invalidates
738 		 * complete.
739 		 */
740 		kvm->mmu_invalidate_range_start =
741 			min(kvm->mmu_invalidate_range_start, start);
742 		kvm->mmu_invalidate_range_end =
743 			max(kvm->mmu_invalidate_range_end, end);
744 	}
745 }
746 
747 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
748 {
749 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
750 	return kvm_unmap_gfn_range(kvm, range);
751 }
752 
753 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
754 					const struct mmu_notifier_range *range)
755 {
756 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
757 	const struct kvm_mmu_notifier_range hva_range = {
758 		.start		= range->start,
759 		.end		= range->end,
760 		.handler	= kvm_mmu_unmap_gfn_range,
761 		.on_lock	= kvm_mmu_invalidate_begin,
762 		.flush_on_ret	= true,
763 		.may_block	= mmu_notifier_range_blockable(range),
764 	};
765 
766 	trace_kvm_unmap_hva_range(range->start, range->end);
767 
768 	/*
769 	 * Prevent memslot modification between range_start() and range_end()
770 	 * so that conditionally locking provides the same result in both
771 	 * functions.  Without that guarantee, the mmu_invalidate_in_progress
772 	 * adjustments will be imbalanced.
773 	 *
774 	 * Pairs with the decrement in range_end().
775 	 */
776 	spin_lock(&kvm->mn_invalidate_lock);
777 	kvm->mn_active_invalidate_count++;
778 	spin_unlock(&kvm->mn_invalidate_lock);
779 
780 	/*
781 	 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
782 	 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
783 	 * each cache's lock.  There are relatively few caches in existence at
784 	 * any given time, and the caches themselves can check for hva overlap,
785 	 * i.e. don't need to rely on memslot overlap checks for performance.
786 	 * Because this runs without holding mmu_lock, the pfn caches must use
787 	 * mn_active_invalidate_count (see above) instead of
788 	 * mmu_invalidate_in_progress.
789 	 */
790 	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
791 
792 	/*
793 	 * If one or more memslots were found and thus zapped, notify arch code
794 	 * that guest memory has been reclaimed.  This needs to be done *after*
795 	 * dropping mmu_lock, as x86's reclaim path is slooooow.
796 	 */
797 	if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
798 		kvm_arch_guest_memory_reclaimed(kvm);
799 
800 	return 0;
801 }
802 
803 void kvm_mmu_invalidate_end(struct kvm *kvm)
804 {
805 	lockdep_assert_held_write(&kvm->mmu_lock);
806 
807 	/*
808 	 * This sequence increase will notify the kvm page fault that
809 	 * the page that is going to be mapped in the spte could have
810 	 * been freed.
811 	 */
812 	kvm->mmu_invalidate_seq++;
813 	smp_wmb();
814 	/*
815 	 * The above sequence increase must be visible before the
816 	 * below count decrease, which is ensured by the smp_wmb above
817 	 * in conjunction with the smp_rmb in mmu_invalidate_retry().
818 	 */
819 	kvm->mmu_invalidate_in_progress--;
820 	KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
821 
822 	/*
823 	 * Assert that at least one range was added between start() and end().
824 	 * Not adding a range isn't fatal, but it is a KVM bug.
825 	 */
826 	WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
827 }
828 
829 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
830 					const struct mmu_notifier_range *range)
831 {
832 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
833 	const struct kvm_mmu_notifier_range hva_range = {
834 		.start		= range->start,
835 		.end		= range->end,
836 		.handler	= (void *)kvm_null_fn,
837 		.on_lock	= kvm_mmu_invalidate_end,
838 		.flush_on_ret	= false,
839 		.may_block	= mmu_notifier_range_blockable(range),
840 	};
841 	bool wake;
842 
843 	__kvm_handle_hva_range(kvm, &hva_range);
844 
845 	/* Pairs with the increment in range_start(). */
846 	spin_lock(&kvm->mn_invalidate_lock);
847 	if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
848 		--kvm->mn_active_invalidate_count;
849 	wake = !kvm->mn_active_invalidate_count;
850 	spin_unlock(&kvm->mn_invalidate_lock);
851 
852 	/*
853 	 * There can only be one waiter, since the wait happens under
854 	 * slots_lock.
855 	 */
856 	if (wake)
857 		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
858 }
859 
860 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
861 					      struct mm_struct *mm,
862 					      unsigned long start,
863 					      unsigned long end)
864 {
865 	trace_kvm_age_hva(start, end);
866 
867 	return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
868 }
869 
870 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
871 					struct mm_struct *mm,
872 					unsigned long start,
873 					unsigned long end)
874 {
875 	trace_kvm_age_hva(start, end);
876 
877 	/*
878 	 * Even though we do not flush TLB, this will still adversely
879 	 * affect performance on pre-Haswell Intel EPT, where there is
880 	 * no EPT Access Bit to clear so that we have to tear down EPT
881 	 * tables instead. If we find this unacceptable, we can always
882 	 * add a parameter to kvm_age_hva so that it effectively doesn't
883 	 * do anything on clear_young.
884 	 *
885 	 * Also note that currently we never issue secondary TLB flushes
886 	 * from clear_young, leaving this job up to the regular system
887 	 * cadence. If we find this inaccurate, we might come up with a
888 	 * more sophisticated heuristic later.
889 	 */
890 	return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
891 }
892 
893 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
894 				       struct mm_struct *mm,
895 				       unsigned long address)
896 {
897 	trace_kvm_test_age_hva(address);
898 
899 	return kvm_handle_hva_range_no_flush(mn, address, address + 1,
900 					     kvm_test_age_gfn);
901 }
902 
903 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
904 				     struct mm_struct *mm)
905 {
906 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
907 	int idx;
908 
909 	idx = srcu_read_lock(&kvm->srcu);
910 	kvm_flush_shadow_all(kvm);
911 	srcu_read_unlock(&kvm->srcu, idx);
912 }
913 
914 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
915 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
916 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
917 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
918 	.clear_young		= kvm_mmu_notifier_clear_young,
919 	.test_young		= kvm_mmu_notifier_test_young,
920 	.release		= kvm_mmu_notifier_release,
921 };
922 
923 static int kvm_init_mmu_notifier(struct kvm *kvm)
924 {
925 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
926 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
927 }
928 
929 #else  /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
930 
931 static int kvm_init_mmu_notifier(struct kvm *kvm)
932 {
933 	return 0;
934 }
935 
936 #endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
937 
938 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
939 static int kvm_pm_notifier_call(struct notifier_block *bl,
940 				unsigned long state,
941 				void *unused)
942 {
943 	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
944 
945 	return kvm_arch_pm_notifier(kvm, state);
946 }
947 
948 static void kvm_init_pm_notifier(struct kvm *kvm)
949 {
950 	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
951 	/* Suspend KVM before we suspend ftrace, RCU, etc. */
952 	kvm->pm_notifier.priority = INT_MAX;
953 	register_pm_notifier(&kvm->pm_notifier);
954 }
955 
956 static void kvm_destroy_pm_notifier(struct kvm *kvm)
957 {
958 	unregister_pm_notifier(&kvm->pm_notifier);
959 }
960 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
961 static void kvm_init_pm_notifier(struct kvm *kvm)
962 {
963 }
964 
965 static void kvm_destroy_pm_notifier(struct kvm *kvm)
966 {
967 }
968 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
969 
970 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
971 {
972 	if (!memslot->dirty_bitmap)
973 		return;
974 
975 	vfree(memslot->dirty_bitmap);
976 	memslot->dirty_bitmap = NULL;
977 }
978 
979 /* This does not remove the slot from struct kvm_memslots data structures */
980 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
981 {
982 	if (slot->flags & KVM_MEM_GUEST_MEMFD)
983 		kvm_gmem_unbind(slot);
984 
985 	kvm_destroy_dirty_bitmap(slot);
986 
987 	kvm_arch_free_memslot(kvm, slot);
988 
989 	kfree(slot);
990 }
991 
992 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
993 {
994 	struct hlist_node *idnode;
995 	struct kvm_memory_slot *memslot;
996 	int bkt;
997 
998 	/*
999 	 * The same memslot objects live in both active and inactive sets,
1000 	 * arbitrarily free using index '1' so the second invocation of this
1001 	 * function isn't operating over a structure with dangling pointers
1002 	 * (even though this function isn't actually touching them).
1003 	 */
1004 	if (!slots->node_idx)
1005 		return;
1006 
1007 	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1008 		kvm_free_memslot(kvm, memslot);
1009 }
1010 
1011 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1012 {
1013 	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
1014 	case KVM_STATS_TYPE_INSTANT:
1015 		return 0444;
1016 	case KVM_STATS_TYPE_CUMULATIVE:
1017 	case KVM_STATS_TYPE_PEAK:
1018 	default:
1019 		return 0644;
1020 	}
1021 }
1022 
1023 
1024 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1025 {
1026 	int i;
1027 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1028 				      kvm_vcpu_stats_header.num_desc;
1029 
1030 	if (IS_ERR(kvm->debugfs_dentry))
1031 		return;
1032 
1033 	debugfs_remove_recursive(kvm->debugfs_dentry);
1034 
1035 	if (kvm->debugfs_stat_data) {
1036 		for (i = 0; i < kvm_debugfs_num_entries; i++)
1037 			kfree(kvm->debugfs_stat_data[i]);
1038 		kfree(kvm->debugfs_stat_data);
1039 	}
1040 }
1041 
1042 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1043 {
1044 	static DEFINE_MUTEX(kvm_debugfs_lock);
1045 	struct dentry *dent;
1046 	char dir_name[ITOA_MAX_LEN * 2];
1047 	struct kvm_stat_data *stat_data;
1048 	const struct _kvm_stats_desc *pdesc;
1049 	int i, ret = -ENOMEM;
1050 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1051 				      kvm_vcpu_stats_header.num_desc;
1052 
1053 	if (!debugfs_initialized())
1054 		return 0;
1055 
1056 	snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1057 	mutex_lock(&kvm_debugfs_lock);
1058 	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1059 	if (dent) {
1060 		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1061 		dput(dent);
1062 		mutex_unlock(&kvm_debugfs_lock);
1063 		return 0;
1064 	}
1065 	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1066 	mutex_unlock(&kvm_debugfs_lock);
1067 	if (IS_ERR(dent))
1068 		return 0;
1069 
1070 	kvm->debugfs_dentry = dent;
1071 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1072 					 sizeof(*kvm->debugfs_stat_data),
1073 					 GFP_KERNEL_ACCOUNT);
1074 	if (!kvm->debugfs_stat_data)
1075 		goto out_err;
1076 
1077 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1078 		pdesc = &kvm_vm_stats_desc[i];
1079 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1080 		if (!stat_data)
1081 			goto out_err;
1082 
1083 		stat_data->kvm = kvm;
1084 		stat_data->desc = pdesc;
1085 		stat_data->kind = KVM_STAT_VM;
1086 		kvm->debugfs_stat_data[i] = stat_data;
1087 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1088 				    kvm->debugfs_dentry, stat_data,
1089 				    &stat_fops_per_vm);
1090 	}
1091 
1092 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1093 		pdesc = &kvm_vcpu_stats_desc[i];
1094 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1095 		if (!stat_data)
1096 			goto out_err;
1097 
1098 		stat_data->kvm = kvm;
1099 		stat_data->desc = pdesc;
1100 		stat_data->kind = KVM_STAT_VCPU;
1101 		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1102 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1103 				    kvm->debugfs_dentry, stat_data,
1104 				    &stat_fops_per_vm);
1105 	}
1106 
1107 	kvm_arch_create_vm_debugfs(kvm);
1108 	return 0;
1109 out_err:
1110 	kvm_destroy_vm_debugfs(kvm);
1111 	return ret;
1112 }
1113 
1114 /*
1115  * Called after the VM is otherwise initialized, but just before adding it to
1116  * the vm_list.
1117  */
1118 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1119 {
1120 	return 0;
1121 }
1122 
1123 /*
1124  * Called just after removing the VM from the vm_list, but before doing any
1125  * other destruction.
1126  */
1127 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1128 {
1129 }
1130 
1131 /*
1132  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1133  * be setup already, so we can create arch-specific debugfs entries under it.
1134  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1135  * a per-arch destroy interface is not needed.
1136  */
1137 void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1138 {
1139 }
1140 
1141 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1142 {
1143 	struct kvm *kvm = kvm_arch_alloc_vm();
1144 	struct kvm_memslots *slots;
1145 	int r, i, j;
1146 
1147 	if (!kvm)
1148 		return ERR_PTR(-ENOMEM);
1149 
1150 	KVM_MMU_LOCK_INIT(kvm);
1151 	mmgrab(current->mm);
1152 	kvm->mm = current->mm;
1153 	kvm_eventfd_init(kvm);
1154 	mutex_init(&kvm->lock);
1155 	mutex_init(&kvm->irq_lock);
1156 	mutex_init(&kvm->slots_lock);
1157 	mutex_init(&kvm->slots_arch_lock);
1158 	spin_lock_init(&kvm->mn_invalidate_lock);
1159 	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1160 	xa_init(&kvm->vcpu_array);
1161 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1162 	xa_init(&kvm->mem_attr_array);
1163 #endif
1164 
1165 	INIT_LIST_HEAD(&kvm->gpc_list);
1166 	spin_lock_init(&kvm->gpc_lock);
1167 
1168 	INIT_LIST_HEAD(&kvm->devices);
1169 	kvm->max_vcpus = KVM_MAX_VCPUS;
1170 
1171 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1172 
1173 	/*
1174 	 * Force subsequent debugfs file creations to fail if the VM directory
1175 	 * is not created (by kvm_create_vm_debugfs()).
1176 	 */
1177 	kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1178 
1179 	snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1180 		 task_pid_nr(current));
1181 
1182 	r = -ENOMEM;
1183 	if (init_srcu_struct(&kvm->srcu))
1184 		goto out_err_no_srcu;
1185 	if (init_srcu_struct(&kvm->irq_srcu))
1186 		goto out_err_no_irq_srcu;
1187 
1188 	r = kvm_init_irq_routing(kvm);
1189 	if (r)
1190 		goto out_err_no_irq_routing;
1191 
1192 	refcount_set(&kvm->users_count, 1);
1193 
1194 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1195 		for (j = 0; j < 2; j++) {
1196 			slots = &kvm->__memslots[i][j];
1197 
1198 			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1199 			slots->hva_tree = RB_ROOT_CACHED;
1200 			slots->gfn_tree = RB_ROOT;
1201 			hash_init(slots->id_hash);
1202 			slots->node_idx = j;
1203 
1204 			/* Generations must be different for each address space. */
1205 			slots->generation = i;
1206 		}
1207 
1208 		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1209 	}
1210 
1211 	r = -ENOMEM;
1212 	for (i = 0; i < KVM_NR_BUSES; i++) {
1213 		rcu_assign_pointer(kvm->buses[i],
1214 			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1215 		if (!kvm->buses[i])
1216 			goto out_err_no_arch_destroy_vm;
1217 	}
1218 
1219 	r = kvm_arch_init_vm(kvm, type);
1220 	if (r)
1221 		goto out_err_no_arch_destroy_vm;
1222 
1223 	r = hardware_enable_all();
1224 	if (r)
1225 		goto out_err_no_disable;
1226 
1227 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1228 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1229 #endif
1230 
1231 	r = kvm_init_mmu_notifier(kvm);
1232 	if (r)
1233 		goto out_err_no_mmu_notifier;
1234 
1235 	r = kvm_coalesced_mmio_init(kvm);
1236 	if (r < 0)
1237 		goto out_no_coalesced_mmio;
1238 
1239 	r = kvm_create_vm_debugfs(kvm, fdname);
1240 	if (r)
1241 		goto out_err_no_debugfs;
1242 
1243 	r = kvm_arch_post_init_vm(kvm);
1244 	if (r)
1245 		goto out_err;
1246 
1247 	mutex_lock(&kvm_lock);
1248 	list_add(&kvm->vm_list, &vm_list);
1249 	mutex_unlock(&kvm_lock);
1250 
1251 	preempt_notifier_inc();
1252 	kvm_init_pm_notifier(kvm);
1253 
1254 	return kvm;
1255 
1256 out_err:
1257 	kvm_destroy_vm_debugfs(kvm);
1258 out_err_no_debugfs:
1259 	kvm_coalesced_mmio_free(kvm);
1260 out_no_coalesced_mmio:
1261 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1262 	if (kvm->mmu_notifier.ops)
1263 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1264 #endif
1265 out_err_no_mmu_notifier:
1266 	hardware_disable_all();
1267 out_err_no_disable:
1268 	kvm_arch_destroy_vm(kvm);
1269 out_err_no_arch_destroy_vm:
1270 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1271 	for (i = 0; i < KVM_NR_BUSES; i++)
1272 		kfree(kvm_get_bus(kvm, i));
1273 	kvm_free_irq_routing(kvm);
1274 out_err_no_irq_routing:
1275 	cleanup_srcu_struct(&kvm->irq_srcu);
1276 out_err_no_irq_srcu:
1277 	cleanup_srcu_struct(&kvm->srcu);
1278 out_err_no_srcu:
1279 	kvm_arch_free_vm(kvm);
1280 	mmdrop(current->mm);
1281 	return ERR_PTR(r);
1282 }
1283 
1284 static void kvm_destroy_devices(struct kvm *kvm)
1285 {
1286 	struct kvm_device *dev, *tmp;
1287 
1288 	/*
1289 	 * We do not need to take the kvm->lock here, because nobody else
1290 	 * has a reference to the struct kvm at this point and therefore
1291 	 * cannot access the devices list anyhow.
1292 	 *
1293 	 * The device list is generally managed as an rculist, but list_del()
1294 	 * is used intentionally here. If a bug in KVM introduced a reader that
1295 	 * was not backed by a reference on the kvm struct, the hope is that
1296 	 * it'd consume the poisoned forward pointer instead of suffering a
1297 	 * use-after-free, even though this cannot be guaranteed.
1298 	 */
1299 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1300 		list_del(&dev->vm_node);
1301 		dev->ops->destroy(dev);
1302 	}
1303 }
1304 
1305 static void kvm_destroy_vm(struct kvm *kvm)
1306 {
1307 	int i;
1308 	struct mm_struct *mm = kvm->mm;
1309 
1310 	kvm_destroy_pm_notifier(kvm);
1311 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1312 	kvm_destroy_vm_debugfs(kvm);
1313 	kvm_arch_sync_events(kvm);
1314 	mutex_lock(&kvm_lock);
1315 	list_del(&kvm->vm_list);
1316 	mutex_unlock(&kvm_lock);
1317 	kvm_arch_pre_destroy_vm(kvm);
1318 
1319 	kvm_free_irq_routing(kvm);
1320 	for (i = 0; i < KVM_NR_BUSES; i++) {
1321 		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1322 
1323 		if (bus)
1324 			kvm_io_bus_destroy(bus);
1325 		kvm->buses[i] = NULL;
1326 	}
1327 	kvm_coalesced_mmio_free(kvm);
1328 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
1329 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1330 	/*
1331 	 * At this point, pending calls to invalidate_range_start()
1332 	 * have completed but no more MMU notifiers will run, so
1333 	 * mn_active_invalidate_count may remain unbalanced.
1334 	 * No threads can be waiting in kvm_swap_active_memslots() as the
1335 	 * last reference on KVM has been dropped, but freeing
1336 	 * memslots would deadlock without this manual intervention.
1337 	 *
1338 	 * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
1339 	 * notifier between a start() and end(), then there shouldn't be any
1340 	 * in-progress invalidations.
1341 	 */
1342 	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1343 	if (kvm->mn_active_invalidate_count)
1344 		kvm->mn_active_invalidate_count = 0;
1345 	else
1346 		WARN_ON(kvm->mmu_invalidate_in_progress);
1347 #else
1348 	kvm_flush_shadow_all(kvm);
1349 #endif
1350 	kvm_arch_destroy_vm(kvm);
1351 	kvm_destroy_devices(kvm);
1352 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1353 		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1354 		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1355 	}
1356 	cleanup_srcu_struct(&kvm->irq_srcu);
1357 	cleanup_srcu_struct(&kvm->srcu);
1358 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
1359 	xa_destroy(&kvm->mem_attr_array);
1360 #endif
1361 	kvm_arch_free_vm(kvm);
1362 	preempt_notifier_dec();
1363 	hardware_disable_all();
1364 	mmdrop(mm);
1365 }
1366 
1367 void kvm_get_kvm(struct kvm *kvm)
1368 {
1369 	refcount_inc(&kvm->users_count);
1370 }
1371 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1372 
1373 /*
1374  * Make sure the vm is not during destruction, which is a safe version of
1375  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1376  */
1377 bool kvm_get_kvm_safe(struct kvm *kvm)
1378 {
1379 	return refcount_inc_not_zero(&kvm->users_count);
1380 }
1381 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1382 
1383 void kvm_put_kvm(struct kvm *kvm)
1384 {
1385 	if (refcount_dec_and_test(&kvm->users_count))
1386 		kvm_destroy_vm(kvm);
1387 }
1388 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1389 
1390 /*
1391  * Used to put a reference that was taken on behalf of an object associated
1392  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1393  * of the new file descriptor fails and the reference cannot be transferred to
1394  * its final owner.  In such cases, the caller is still actively using @kvm and
1395  * will fail miserably if the refcount unexpectedly hits zero.
1396  */
1397 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1398 {
1399 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1400 }
1401 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1402 
1403 static int kvm_vm_release(struct inode *inode, struct file *filp)
1404 {
1405 	struct kvm *kvm = filp->private_data;
1406 
1407 	kvm_irqfd_release(kvm);
1408 
1409 	kvm_put_kvm(kvm);
1410 	return 0;
1411 }
1412 
1413 /*
1414  * Allocation size is twice as large as the actual dirty bitmap size.
1415  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1416  */
1417 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1418 {
1419 	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1420 
1421 	memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1422 	if (!memslot->dirty_bitmap)
1423 		return -ENOMEM;
1424 
1425 	return 0;
1426 }
1427 
1428 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1429 {
1430 	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1431 	int node_idx_inactive = active->node_idx ^ 1;
1432 
1433 	return &kvm->__memslots[as_id][node_idx_inactive];
1434 }
1435 
1436 /*
1437  * Helper to get the address space ID when one of memslot pointers may be NULL.
1438  * This also serves as a sanity that at least one of the pointers is non-NULL,
1439  * and that their address space IDs don't diverge.
1440  */
1441 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1442 				  struct kvm_memory_slot *b)
1443 {
1444 	if (WARN_ON_ONCE(!a && !b))
1445 		return 0;
1446 
1447 	if (!a)
1448 		return b->as_id;
1449 	if (!b)
1450 		return a->as_id;
1451 
1452 	WARN_ON_ONCE(a->as_id != b->as_id);
1453 	return a->as_id;
1454 }
1455 
1456 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1457 				struct kvm_memory_slot *slot)
1458 {
1459 	struct rb_root *gfn_tree = &slots->gfn_tree;
1460 	struct rb_node **node, *parent;
1461 	int idx = slots->node_idx;
1462 
1463 	parent = NULL;
1464 	for (node = &gfn_tree->rb_node; *node; ) {
1465 		struct kvm_memory_slot *tmp;
1466 
1467 		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1468 		parent = *node;
1469 		if (slot->base_gfn < tmp->base_gfn)
1470 			node = &(*node)->rb_left;
1471 		else if (slot->base_gfn > tmp->base_gfn)
1472 			node = &(*node)->rb_right;
1473 		else
1474 			BUG();
1475 	}
1476 
1477 	rb_link_node(&slot->gfn_node[idx], parent, node);
1478 	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1479 }
1480 
1481 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1482 			       struct kvm_memory_slot *slot)
1483 {
1484 	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1485 }
1486 
1487 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1488 				 struct kvm_memory_slot *old,
1489 				 struct kvm_memory_slot *new)
1490 {
1491 	int idx = slots->node_idx;
1492 
1493 	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1494 
1495 	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1496 			&slots->gfn_tree);
1497 }
1498 
1499 /*
1500  * Replace @old with @new in the inactive memslots.
1501  *
1502  * With NULL @old this simply adds @new.
1503  * With NULL @new this simply removes @old.
1504  *
1505  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1506  * appropriately.
1507  */
1508 static void kvm_replace_memslot(struct kvm *kvm,
1509 				struct kvm_memory_slot *old,
1510 				struct kvm_memory_slot *new)
1511 {
1512 	int as_id = kvm_memslots_get_as_id(old, new);
1513 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1514 	int idx = slots->node_idx;
1515 
1516 	if (old) {
1517 		hash_del(&old->id_node[idx]);
1518 		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1519 
1520 		if ((long)old == atomic_long_read(&slots->last_used_slot))
1521 			atomic_long_set(&slots->last_used_slot, (long)new);
1522 
1523 		if (!new) {
1524 			kvm_erase_gfn_node(slots, old);
1525 			return;
1526 		}
1527 	}
1528 
1529 	/*
1530 	 * Initialize @new's hva range.  Do this even when replacing an @old
1531 	 * slot, kvm_copy_memslot() deliberately does not touch node data.
1532 	 */
1533 	new->hva_node[idx].start = new->userspace_addr;
1534 	new->hva_node[idx].last = new->userspace_addr +
1535 				  (new->npages << PAGE_SHIFT) - 1;
1536 
1537 	/*
1538 	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1539 	 * hva_node needs to be swapped with remove+insert even though hva can't
1540 	 * change when replacing an existing slot.
1541 	 */
1542 	hash_add(slots->id_hash, &new->id_node[idx], new->id);
1543 	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1544 
1545 	/*
1546 	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1547 	 * switch the node in the gfn tree instead of removing the old and
1548 	 * inserting the new as two separate operations. Replacement is a
1549 	 * single O(1) operation versus two O(log(n)) operations for
1550 	 * remove+insert.
1551 	 */
1552 	if (old && old->base_gfn == new->base_gfn) {
1553 		kvm_replace_gfn_node(slots, old, new);
1554 	} else {
1555 		if (old)
1556 			kvm_erase_gfn_node(slots, old);
1557 		kvm_insert_gfn_node(slots, new);
1558 	}
1559 }
1560 
1561 /*
1562  * Flags that do not access any of the extra space of struct
1563  * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
1564  * only allows these.
1565  */
1566 #define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
1567 	(KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
1568 
1569 static int check_memory_region_flags(struct kvm *kvm,
1570 				     const struct kvm_userspace_memory_region2 *mem)
1571 {
1572 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1573 
1574 	if (kvm_arch_has_private_mem(kvm))
1575 		valid_flags |= KVM_MEM_GUEST_MEMFD;
1576 
1577 	/* Dirty logging private memory is not currently supported. */
1578 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
1579 		valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1580 
1581 #ifdef CONFIG_HAVE_KVM_READONLY_MEM
1582 	/*
1583 	 * GUEST_MEMFD is incompatible with read-only memslots, as writes to
1584 	 * read-only memslots have emulated MMIO, not page fault, semantics,
1585 	 * and KVM doesn't allow emulated MMIO for private memory.
1586 	 */
1587 	if (!(mem->flags & KVM_MEM_GUEST_MEMFD))
1588 		valid_flags |= KVM_MEM_READONLY;
1589 #endif
1590 
1591 	if (mem->flags & ~valid_flags)
1592 		return -EINVAL;
1593 
1594 	return 0;
1595 }
1596 
1597 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1598 {
1599 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1600 
1601 	/* Grab the generation from the activate memslots. */
1602 	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1603 
1604 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1605 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1606 
1607 	/*
1608 	 * Do not store the new memslots while there are invalidations in
1609 	 * progress, otherwise the locking in invalidate_range_start and
1610 	 * invalidate_range_end will be unbalanced.
1611 	 */
1612 	spin_lock(&kvm->mn_invalidate_lock);
1613 	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1614 	while (kvm->mn_active_invalidate_count) {
1615 		set_current_state(TASK_UNINTERRUPTIBLE);
1616 		spin_unlock(&kvm->mn_invalidate_lock);
1617 		schedule();
1618 		spin_lock(&kvm->mn_invalidate_lock);
1619 	}
1620 	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1621 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1622 	spin_unlock(&kvm->mn_invalidate_lock);
1623 
1624 	/*
1625 	 * Acquired in kvm_set_memslot. Must be released before synchronize
1626 	 * SRCU below in order to avoid deadlock with another thread
1627 	 * acquiring the slots_arch_lock in an srcu critical section.
1628 	 */
1629 	mutex_unlock(&kvm->slots_arch_lock);
1630 
1631 	synchronize_srcu_expedited(&kvm->srcu);
1632 
1633 	/*
1634 	 * Increment the new memslot generation a second time, dropping the
1635 	 * update in-progress flag and incrementing the generation based on
1636 	 * the number of address spaces.  This provides a unique and easily
1637 	 * identifiable generation number while the memslots are in flux.
1638 	 */
1639 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1640 
1641 	/*
1642 	 * Generations must be unique even across address spaces.  We do not need
1643 	 * a global counter for that, instead the generation space is evenly split
1644 	 * across address spaces.  For example, with two address spaces, address
1645 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1646 	 * use generations 1, 3, 5, ...
1647 	 */
1648 	gen += kvm_arch_nr_memslot_as_ids(kvm);
1649 
1650 	kvm_arch_memslots_updated(kvm, gen);
1651 
1652 	slots->generation = gen;
1653 }
1654 
1655 static int kvm_prepare_memory_region(struct kvm *kvm,
1656 				     const struct kvm_memory_slot *old,
1657 				     struct kvm_memory_slot *new,
1658 				     enum kvm_mr_change change)
1659 {
1660 	int r;
1661 
1662 	/*
1663 	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
1664 	 * will be freed on "commit".  If logging is enabled in both old and
1665 	 * new, reuse the existing bitmap.  If logging is enabled only in the
1666 	 * new and KVM isn't using a ring buffer, allocate and initialize a
1667 	 * new bitmap.
1668 	 */
1669 	if (change != KVM_MR_DELETE) {
1670 		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1671 			new->dirty_bitmap = NULL;
1672 		else if (old && old->dirty_bitmap)
1673 			new->dirty_bitmap = old->dirty_bitmap;
1674 		else if (kvm_use_dirty_bitmap(kvm)) {
1675 			r = kvm_alloc_dirty_bitmap(new);
1676 			if (r)
1677 				return r;
1678 
1679 			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1680 				bitmap_set(new->dirty_bitmap, 0, new->npages);
1681 		}
1682 	}
1683 
1684 	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1685 
1686 	/* Free the bitmap on failure if it was allocated above. */
1687 	if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1688 		kvm_destroy_dirty_bitmap(new);
1689 
1690 	return r;
1691 }
1692 
1693 static void kvm_commit_memory_region(struct kvm *kvm,
1694 				     struct kvm_memory_slot *old,
1695 				     const struct kvm_memory_slot *new,
1696 				     enum kvm_mr_change change)
1697 {
1698 	int old_flags = old ? old->flags : 0;
1699 	int new_flags = new ? new->flags : 0;
1700 	/*
1701 	 * Update the total number of memslot pages before calling the arch
1702 	 * hook so that architectures can consume the result directly.
1703 	 */
1704 	if (change == KVM_MR_DELETE)
1705 		kvm->nr_memslot_pages -= old->npages;
1706 	else if (change == KVM_MR_CREATE)
1707 		kvm->nr_memslot_pages += new->npages;
1708 
1709 	if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1710 		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1711 		atomic_set(&kvm->nr_memslots_dirty_logging,
1712 			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1713 	}
1714 
1715 	kvm_arch_commit_memory_region(kvm, old, new, change);
1716 
1717 	switch (change) {
1718 	case KVM_MR_CREATE:
1719 		/* Nothing more to do. */
1720 		break;
1721 	case KVM_MR_DELETE:
1722 		/* Free the old memslot and all its metadata. */
1723 		kvm_free_memslot(kvm, old);
1724 		break;
1725 	case KVM_MR_MOVE:
1726 	case KVM_MR_FLAGS_ONLY:
1727 		/*
1728 		 * Free the dirty bitmap as needed; the below check encompasses
1729 		 * both the flags and whether a ring buffer is being used)
1730 		 */
1731 		if (old->dirty_bitmap && !new->dirty_bitmap)
1732 			kvm_destroy_dirty_bitmap(old);
1733 
1734 		/*
1735 		 * The final quirk.  Free the detached, old slot, but only its
1736 		 * memory, not any metadata.  Metadata, including arch specific
1737 		 * data, may be reused by @new.
1738 		 */
1739 		kfree(old);
1740 		break;
1741 	default:
1742 		BUG();
1743 	}
1744 }
1745 
1746 /*
1747  * Activate @new, which must be installed in the inactive slots by the caller,
1748  * by swapping the active slots and then propagating @new to @old once @old is
1749  * unreachable and can be safely modified.
1750  *
1751  * With NULL @old this simply adds @new to @active (while swapping the sets).
1752  * With NULL @new this simply removes @old from @active and frees it
1753  * (while also swapping the sets).
1754  */
1755 static void kvm_activate_memslot(struct kvm *kvm,
1756 				 struct kvm_memory_slot *old,
1757 				 struct kvm_memory_slot *new)
1758 {
1759 	int as_id = kvm_memslots_get_as_id(old, new);
1760 
1761 	kvm_swap_active_memslots(kvm, as_id);
1762 
1763 	/* Propagate the new memslot to the now inactive memslots. */
1764 	kvm_replace_memslot(kvm, old, new);
1765 }
1766 
1767 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1768 			     const struct kvm_memory_slot *src)
1769 {
1770 	dest->base_gfn = src->base_gfn;
1771 	dest->npages = src->npages;
1772 	dest->dirty_bitmap = src->dirty_bitmap;
1773 	dest->arch = src->arch;
1774 	dest->userspace_addr = src->userspace_addr;
1775 	dest->flags = src->flags;
1776 	dest->id = src->id;
1777 	dest->as_id = src->as_id;
1778 }
1779 
1780 static void kvm_invalidate_memslot(struct kvm *kvm,
1781 				   struct kvm_memory_slot *old,
1782 				   struct kvm_memory_slot *invalid_slot)
1783 {
1784 	/*
1785 	 * Mark the current slot INVALID.  As with all memslot modifications,
1786 	 * this must be done on an unreachable slot to avoid modifying the
1787 	 * current slot in the active tree.
1788 	 */
1789 	kvm_copy_memslot(invalid_slot, old);
1790 	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1791 	kvm_replace_memslot(kvm, old, invalid_slot);
1792 
1793 	/*
1794 	 * Activate the slot that is now marked INVALID, but don't propagate
1795 	 * the slot to the now inactive slots. The slot is either going to be
1796 	 * deleted or recreated as a new slot.
1797 	 */
1798 	kvm_swap_active_memslots(kvm, old->as_id);
1799 
1800 	/*
1801 	 * From this point no new shadow pages pointing to a deleted, or moved,
1802 	 * memslot will be created.  Validation of sp->gfn happens in:
1803 	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1804 	 *	- kvm_is_visible_gfn (mmu_check_root)
1805 	 */
1806 	kvm_arch_flush_shadow_memslot(kvm, old);
1807 	kvm_arch_guest_memory_reclaimed(kvm);
1808 
1809 	/* Was released by kvm_swap_active_memslots(), reacquire. */
1810 	mutex_lock(&kvm->slots_arch_lock);
1811 
1812 	/*
1813 	 * Copy the arch-specific field of the newly-installed slot back to the
1814 	 * old slot as the arch data could have changed between releasing
1815 	 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1816 	 * above.  Writers are required to retrieve memslots *after* acquiring
1817 	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1818 	 */
1819 	old->arch = invalid_slot->arch;
1820 }
1821 
1822 static void kvm_create_memslot(struct kvm *kvm,
1823 			       struct kvm_memory_slot *new)
1824 {
1825 	/* Add the new memslot to the inactive set and activate. */
1826 	kvm_replace_memslot(kvm, NULL, new);
1827 	kvm_activate_memslot(kvm, NULL, new);
1828 }
1829 
1830 static void kvm_delete_memslot(struct kvm *kvm,
1831 			       struct kvm_memory_slot *old,
1832 			       struct kvm_memory_slot *invalid_slot)
1833 {
1834 	/*
1835 	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1836 	 * the "new" slot, and for the invalid version in the active slots.
1837 	 */
1838 	kvm_replace_memslot(kvm, old, NULL);
1839 	kvm_activate_memslot(kvm, invalid_slot, NULL);
1840 }
1841 
1842 static void kvm_move_memslot(struct kvm *kvm,
1843 			     struct kvm_memory_slot *old,
1844 			     struct kvm_memory_slot *new,
1845 			     struct kvm_memory_slot *invalid_slot)
1846 {
1847 	/*
1848 	 * Replace the old memslot in the inactive slots, and then swap slots
1849 	 * and replace the current INVALID with the new as well.
1850 	 */
1851 	kvm_replace_memslot(kvm, old, new);
1852 	kvm_activate_memslot(kvm, invalid_slot, new);
1853 }
1854 
1855 static void kvm_update_flags_memslot(struct kvm *kvm,
1856 				     struct kvm_memory_slot *old,
1857 				     struct kvm_memory_slot *new)
1858 {
1859 	/*
1860 	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1861 	 * an intermediate step. Instead, the old memslot is simply replaced
1862 	 * with a new, updated copy in both memslot sets.
1863 	 */
1864 	kvm_replace_memslot(kvm, old, new);
1865 	kvm_activate_memslot(kvm, old, new);
1866 }
1867 
1868 static int kvm_set_memslot(struct kvm *kvm,
1869 			   struct kvm_memory_slot *old,
1870 			   struct kvm_memory_slot *new,
1871 			   enum kvm_mr_change change)
1872 {
1873 	struct kvm_memory_slot *invalid_slot;
1874 	int r;
1875 
1876 	/*
1877 	 * Released in kvm_swap_active_memslots().
1878 	 *
1879 	 * Must be held from before the current memslots are copied until after
1880 	 * the new memslots are installed with rcu_assign_pointer, then
1881 	 * released before the synchronize srcu in kvm_swap_active_memslots().
1882 	 *
1883 	 * When modifying memslots outside of the slots_lock, must be held
1884 	 * before reading the pointer to the current memslots until after all
1885 	 * changes to those memslots are complete.
1886 	 *
1887 	 * These rules ensure that installing new memslots does not lose
1888 	 * changes made to the previous memslots.
1889 	 */
1890 	mutex_lock(&kvm->slots_arch_lock);
1891 
1892 	/*
1893 	 * Invalidate the old slot if it's being deleted or moved.  This is
1894 	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1895 	 * continue running by ensuring there are no mappings or shadow pages
1896 	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1897 	 * (and without a lock), a window would exist between effecting the
1898 	 * delete/move and committing the changes in arch code where KVM or a
1899 	 * guest could access a non-existent memslot.
1900 	 *
1901 	 * Modifications are done on a temporary, unreachable slot.  The old
1902 	 * slot needs to be preserved in case a later step fails and the
1903 	 * invalidation needs to be reverted.
1904 	 */
1905 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1906 		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1907 		if (!invalid_slot) {
1908 			mutex_unlock(&kvm->slots_arch_lock);
1909 			return -ENOMEM;
1910 		}
1911 		kvm_invalidate_memslot(kvm, old, invalid_slot);
1912 	}
1913 
1914 	r = kvm_prepare_memory_region(kvm, old, new, change);
1915 	if (r) {
1916 		/*
1917 		 * For DELETE/MOVE, revert the above INVALID change.  No
1918 		 * modifications required since the original slot was preserved
1919 		 * in the inactive slots.  Changing the active memslots also
1920 		 * release slots_arch_lock.
1921 		 */
1922 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1923 			kvm_activate_memslot(kvm, invalid_slot, old);
1924 			kfree(invalid_slot);
1925 		} else {
1926 			mutex_unlock(&kvm->slots_arch_lock);
1927 		}
1928 		return r;
1929 	}
1930 
1931 	/*
1932 	 * For DELETE and MOVE, the working slot is now active as the INVALID
1933 	 * version of the old slot.  MOVE is particularly special as it reuses
1934 	 * the old slot and returns a copy of the old slot (in working_slot).
1935 	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1936 	 * old slot is detached but otherwise preserved.
1937 	 */
1938 	if (change == KVM_MR_CREATE)
1939 		kvm_create_memslot(kvm, new);
1940 	else if (change == KVM_MR_DELETE)
1941 		kvm_delete_memslot(kvm, old, invalid_slot);
1942 	else if (change == KVM_MR_MOVE)
1943 		kvm_move_memslot(kvm, old, new, invalid_slot);
1944 	else if (change == KVM_MR_FLAGS_ONLY)
1945 		kvm_update_flags_memslot(kvm, old, new);
1946 	else
1947 		BUG();
1948 
1949 	/* Free the temporary INVALID slot used for DELETE and MOVE. */
1950 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1951 		kfree(invalid_slot);
1952 
1953 	/*
1954 	 * No need to refresh new->arch, changes after dropping slots_arch_lock
1955 	 * will directly hit the final, active memslot.  Architectures are
1956 	 * responsible for knowing that new->arch may be stale.
1957 	 */
1958 	kvm_commit_memory_region(kvm, old, new, change);
1959 
1960 	return 0;
1961 }
1962 
1963 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1964 				      gfn_t start, gfn_t end)
1965 {
1966 	struct kvm_memslot_iter iter;
1967 
1968 	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1969 		if (iter.slot->id != id)
1970 			return true;
1971 	}
1972 
1973 	return false;
1974 }
1975 
1976 /*
1977  * Allocate some memory and give it an address in the guest physical address
1978  * space.
1979  *
1980  * Discontiguous memory is allowed, mostly for framebuffers.
1981  *
1982  * Must be called holding kvm->slots_lock for write.
1983  */
1984 int __kvm_set_memory_region(struct kvm *kvm,
1985 			    const struct kvm_userspace_memory_region2 *mem)
1986 {
1987 	struct kvm_memory_slot *old, *new;
1988 	struct kvm_memslots *slots;
1989 	enum kvm_mr_change change;
1990 	unsigned long npages;
1991 	gfn_t base_gfn;
1992 	int as_id, id;
1993 	int r;
1994 
1995 	r = check_memory_region_flags(kvm, mem);
1996 	if (r)
1997 		return r;
1998 
1999 	as_id = mem->slot >> 16;
2000 	id = (u16)mem->slot;
2001 
2002 	/* General sanity checks */
2003 	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
2004 	    (mem->memory_size != (unsigned long)mem->memory_size))
2005 		return -EINVAL;
2006 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
2007 		return -EINVAL;
2008 	/* We can read the guest memory with __xxx_user() later on. */
2009 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
2010 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
2011 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
2012 			mem->memory_size))
2013 		return -EINVAL;
2014 	if (mem->flags & KVM_MEM_GUEST_MEMFD &&
2015 	    (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
2016 	     mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
2017 		return -EINVAL;
2018 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
2019 		return -EINVAL;
2020 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
2021 		return -EINVAL;
2022 	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
2023 		return -EINVAL;
2024 
2025 	slots = __kvm_memslots(kvm, as_id);
2026 
2027 	/*
2028 	 * Note, the old memslot (and the pointer itself!) may be invalidated
2029 	 * and/or destroyed by kvm_set_memslot().
2030 	 */
2031 	old = id_to_memslot(slots, id);
2032 
2033 	if (!mem->memory_size) {
2034 		if (!old || !old->npages)
2035 			return -EINVAL;
2036 
2037 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
2038 			return -EIO;
2039 
2040 		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2041 	}
2042 
2043 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
2044 	npages = (mem->memory_size >> PAGE_SHIFT);
2045 
2046 	if (!old || !old->npages) {
2047 		change = KVM_MR_CREATE;
2048 
2049 		/*
2050 		 * To simplify KVM internals, the total number of pages across
2051 		 * all memslots must fit in an unsigned long.
2052 		 */
2053 		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2054 			return -EINVAL;
2055 	} else { /* Modify an existing slot. */
2056 		/* Private memslots are immutable, they can only be deleted. */
2057 		if (mem->flags & KVM_MEM_GUEST_MEMFD)
2058 			return -EINVAL;
2059 		if ((mem->userspace_addr != old->userspace_addr) ||
2060 		    (npages != old->npages) ||
2061 		    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2062 			return -EINVAL;
2063 
2064 		if (base_gfn != old->base_gfn)
2065 			change = KVM_MR_MOVE;
2066 		else if (mem->flags != old->flags)
2067 			change = KVM_MR_FLAGS_ONLY;
2068 		else /* Nothing to change. */
2069 			return 0;
2070 	}
2071 
2072 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2073 	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2074 		return -EEXIST;
2075 
2076 	/* Allocate a slot that will persist in the memslot. */
2077 	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2078 	if (!new)
2079 		return -ENOMEM;
2080 
2081 	new->as_id = as_id;
2082 	new->id = id;
2083 	new->base_gfn = base_gfn;
2084 	new->npages = npages;
2085 	new->flags = mem->flags;
2086 	new->userspace_addr = mem->userspace_addr;
2087 	if (mem->flags & KVM_MEM_GUEST_MEMFD) {
2088 		r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
2089 		if (r)
2090 			goto out;
2091 	}
2092 
2093 	r = kvm_set_memslot(kvm, old, new, change);
2094 	if (r)
2095 		goto out_unbind;
2096 
2097 	return 0;
2098 
2099 out_unbind:
2100 	if (mem->flags & KVM_MEM_GUEST_MEMFD)
2101 		kvm_gmem_unbind(new);
2102 out:
2103 	kfree(new);
2104 	return r;
2105 }
2106 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2107 
2108 int kvm_set_memory_region(struct kvm *kvm,
2109 			  const struct kvm_userspace_memory_region2 *mem)
2110 {
2111 	int r;
2112 
2113 	mutex_lock(&kvm->slots_lock);
2114 	r = __kvm_set_memory_region(kvm, mem);
2115 	mutex_unlock(&kvm->slots_lock);
2116 	return r;
2117 }
2118 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2119 
2120 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2121 					  struct kvm_userspace_memory_region2 *mem)
2122 {
2123 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2124 		return -EINVAL;
2125 
2126 	return kvm_set_memory_region(kvm, mem);
2127 }
2128 
2129 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2130 /**
2131  * kvm_get_dirty_log - get a snapshot of dirty pages
2132  * @kvm:	pointer to kvm instance
2133  * @log:	slot id and address to which we copy the log
2134  * @is_dirty:	set to '1' if any dirty pages were found
2135  * @memslot:	set to the associated memslot, always valid on success
2136  */
2137 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2138 		      int *is_dirty, struct kvm_memory_slot **memslot)
2139 {
2140 	struct kvm_memslots *slots;
2141 	int i, as_id, id;
2142 	unsigned long n;
2143 	unsigned long any = 0;
2144 
2145 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2146 	if (!kvm_use_dirty_bitmap(kvm))
2147 		return -ENXIO;
2148 
2149 	*memslot = NULL;
2150 	*is_dirty = 0;
2151 
2152 	as_id = log->slot >> 16;
2153 	id = (u16)log->slot;
2154 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2155 		return -EINVAL;
2156 
2157 	slots = __kvm_memslots(kvm, as_id);
2158 	*memslot = id_to_memslot(slots, id);
2159 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
2160 		return -ENOENT;
2161 
2162 	kvm_arch_sync_dirty_log(kvm, *memslot);
2163 
2164 	n = kvm_dirty_bitmap_bytes(*memslot);
2165 
2166 	for (i = 0; !any && i < n/sizeof(long); ++i)
2167 		any = (*memslot)->dirty_bitmap[i];
2168 
2169 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2170 		return -EFAULT;
2171 
2172 	if (any)
2173 		*is_dirty = 1;
2174 	return 0;
2175 }
2176 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2177 
2178 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2179 /**
2180  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2181  *	and reenable dirty page tracking for the corresponding pages.
2182  * @kvm:	pointer to kvm instance
2183  * @log:	slot id and address to which we copy the log
2184  *
2185  * We need to keep it in mind that VCPU threads can write to the bitmap
2186  * concurrently. So, to avoid losing track of dirty pages we keep the
2187  * following order:
2188  *
2189  *    1. Take a snapshot of the bit and clear it if needed.
2190  *    2. Write protect the corresponding page.
2191  *    3. Copy the snapshot to the userspace.
2192  *    4. Upon return caller flushes TLB's if needed.
2193  *
2194  * Between 2 and 4, the guest may write to the page using the remaining TLB
2195  * entry.  This is not a problem because the page is reported dirty using
2196  * the snapshot taken before and step 4 ensures that writes done after
2197  * exiting to userspace will be logged for the next call.
2198  *
2199  */
2200 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2201 {
2202 	struct kvm_memslots *slots;
2203 	struct kvm_memory_slot *memslot;
2204 	int i, as_id, id;
2205 	unsigned long n;
2206 	unsigned long *dirty_bitmap;
2207 	unsigned long *dirty_bitmap_buffer;
2208 	bool flush;
2209 
2210 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2211 	if (!kvm_use_dirty_bitmap(kvm))
2212 		return -ENXIO;
2213 
2214 	as_id = log->slot >> 16;
2215 	id = (u16)log->slot;
2216 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2217 		return -EINVAL;
2218 
2219 	slots = __kvm_memslots(kvm, as_id);
2220 	memslot = id_to_memslot(slots, id);
2221 	if (!memslot || !memslot->dirty_bitmap)
2222 		return -ENOENT;
2223 
2224 	dirty_bitmap = memslot->dirty_bitmap;
2225 
2226 	kvm_arch_sync_dirty_log(kvm, memslot);
2227 
2228 	n = kvm_dirty_bitmap_bytes(memslot);
2229 	flush = false;
2230 	if (kvm->manual_dirty_log_protect) {
2231 		/*
2232 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
2233 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2234 		 * is some code duplication between this function and
2235 		 * kvm_get_dirty_log, but hopefully all architecture
2236 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2237 		 * can be eliminated.
2238 		 */
2239 		dirty_bitmap_buffer = dirty_bitmap;
2240 	} else {
2241 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2242 		memset(dirty_bitmap_buffer, 0, n);
2243 
2244 		KVM_MMU_LOCK(kvm);
2245 		for (i = 0; i < n / sizeof(long); i++) {
2246 			unsigned long mask;
2247 			gfn_t offset;
2248 
2249 			if (!dirty_bitmap[i])
2250 				continue;
2251 
2252 			flush = true;
2253 			mask = xchg(&dirty_bitmap[i], 0);
2254 			dirty_bitmap_buffer[i] = mask;
2255 
2256 			offset = i * BITS_PER_LONG;
2257 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2258 								offset, mask);
2259 		}
2260 		KVM_MMU_UNLOCK(kvm);
2261 	}
2262 
2263 	if (flush)
2264 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2265 
2266 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2267 		return -EFAULT;
2268 	return 0;
2269 }
2270 
2271 
2272 /**
2273  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2274  * @kvm: kvm instance
2275  * @log: slot id and address to which we copy the log
2276  *
2277  * Steps 1-4 below provide general overview of dirty page logging. See
2278  * kvm_get_dirty_log_protect() function description for additional details.
2279  *
2280  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2281  * always flush the TLB (step 4) even if previous step failed  and the dirty
2282  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2283  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2284  * writes will be marked dirty for next log read.
2285  *
2286  *   1. Take a snapshot of the bit and clear it if needed.
2287  *   2. Write protect the corresponding page.
2288  *   3. Copy the snapshot to the userspace.
2289  *   4. Flush TLB's if needed.
2290  */
2291 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2292 				      struct kvm_dirty_log *log)
2293 {
2294 	int r;
2295 
2296 	mutex_lock(&kvm->slots_lock);
2297 
2298 	r = kvm_get_dirty_log_protect(kvm, log);
2299 
2300 	mutex_unlock(&kvm->slots_lock);
2301 	return r;
2302 }
2303 
2304 /**
2305  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2306  *	and reenable dirty page tracking for the corresponding pages.
2307  * @kvm:	pointer to kvm instance
2308  * @log:	slot id and address from which to fetch the bitmap of dirty pages
2309  */
2310 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2311 				       struct kvm_clear_dirty_log *log)
2312 {
2313 	struct kvm_memslots *slots;
2314 	struct kvm_memory_slot *memslot;
2315 	int as_id, id;
2316 	gfn_t offset;
2317 	unsigned long i, n;
2318 	unsigned long *dirty_bitmap;
2319 	unsigned long *dirty_bitmap_buffer;
2320 	bool flush;
2321 
2322 	/* Dirty ring tracking may be exclusive to dirty log tracking */
2323 	if (!kvm_use_dirty_bitmap(kvm))
2324 		return -ENXIO;
2325 
2326 	as_id = log->slot >> 16;
2327 	id = (u16)log->slot;
2328 	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
2329 		return -EINVAL;
2330 
2331 	if (log->first_page & 63)
2332 		return -EINVAL;
2333 
2334 	slots = __kvm_memslots(kvm, as_id);
2335 	memslot = id_to_memslot(slots, id);
2336 	if (!memslot || !memslot->dirty_bitmap)
2337 		return -ENOENT;
2338 
2339 	dirty_bitmap = memslot->dirty_bitmap;
2340 
2341 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2342 
2343 	if (log->first_page > memslot->npages ||
2344 	    log->num_pages > memslot->npages - log->first_page ||
2345 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2346 	    return -EINVAL;
2347 
2348 	kvm_arch_sync_dirty_log(kvm, memslot);
2349 
2350 	flush = false;
2351 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2352 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2353 		return -EFAULT;
2354 
2355 	KVM_MMU_LOCK(kvm);
2356 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
2357 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2358 	     i++, offset += BITS_PER_LONG) {
2359 		unsigned long mask = *dirty_bitmap_buffer++;
2360 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2361 		if (!mask)
2362 			continue;
2363 
2364 		mask &= atomic_long_fetch_andnot(mask, p);
2365 
2366 		/*
2367 		 * mask contains the bits that really have been cleared.  This
2368 		 * never includes any bits beyond the length of the memslot (if
2369 		 * the length is not aligned to 64 pages), therefore it is not
2370 		 * a problem if userspace sets them in log->dirty_bitmap.
2371 		*/
2372 		if (mask) {
2373 			flush = true;
2374 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2375 								offset, mask);
2376 		}
2377 	}
2378 	KVM_MMU_UNLOCK(kvm);
2379 
2380 	if (flush)
2381 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2382 
2383 	return 0;
2384 }
2385 
2386 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2387 					struct kvm_clear_dirty_log *log)
2388 {
2389 	int r;
2390 
2391 	mutex_lock(&kvm->slots_lock);
2392 
2393 	r = kvm_clear_dirty_log_protect(kvm, log);
2394 
2395 	mutex_unlock(&kvm->slots_lock);
2396 	return r;
2397 }
2398 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2399 
2400 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2401 static u64 kvm_supported_mem_attributes(struct kvm *kvm)
2402 {
2403 	if (!kvm || kvm_arch_has_private_mem(kvm))
2404 		return KVM_MEMORY_ATTRIBUTE_PRIVATE;
2405 
2406 	return 0;
2407 }
2408 
2409 /*
2410  * Returns true if _all_ gfns in the range [@start, @end) have attributes
2411  * such that the bits in @mask match @attrs.
2412  */
2413 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2414 				     unsigned long mask, unsigned long attrs)
2415 {
2416 	XA_STATE(xas, &kvm->mem_attr_array, start);
2417 	unsigned long index;
2418 	void *entry;
2419 
2420 	mask &= kvm_supported_mem_attributes(kvm);
2421 	if (attrs & ~mask)
2422 		return false;
2423 
2424 	if (end == start + 1)
2425 		return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
2426 
2427 	guard(rcu)();
2428 	if (!attrs)
2429 		return !xas_find(&xas, end - 1);
2430 
2431 	for (index = start; index < end; index++) {
2432 		do {
2433 			entry = xas_next(&xas);
2434 		} while (xas_retry(&xas, entry));
2435 
2436 		if (xas.xa_index != index ||
2437 		    (xa_to_value(entry) & mask) != attrs)
2438 			return false;
2439 	}
2440 
2441 	return true;
2442 }
2443 
2444 static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
2445 						 struct kvm_mmu_notifier_range *range)
2446 {
2447 	struct kvm_gfn_range gfn_range;
2448 	struct kvm_memory_slot *slot;
2449 	struct kvm_memslots *slots;
2450 	struct kvm_memslot_iter iter;
2451 	bool found_memslot = false;
2452 	bool ret = false;
2453 	int i;
2454 
2455 	gfn_range.arg = range->arg;
2456 	gfn_range.may_block = range->may_block;
2457 
2458 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
2459 		slots = __kvm_memslots(kvm, i);
2460 
2461 		kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
2462 			slot = iter.slot;
2463 			gfn_range.slot = slot;
2464 
2465 			gfn_range.start = max(range->start, slot->base_gfn);
2466 			gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
2467 			if (gfn_range.start >= gfn_range.end)
2468 				continue;
2469 
2470 			if (!found_memslot) {
2471 				found_memslot = true;
2472 				KVM_MMU_LOCK(kvm);
2473 				if (!IS_KVM_NULL_FN(range->on_lock))
2474 					range->on_lock(kvm);
2475 			}
2476 
2477 			ret |= range->handler(kvm, &gfn_range);
2478 		}
2479 	}
2480 
2481 	if (range->flush_on_ret && ret)
2482 		kvm_flush_remote_tlbs(kvm);
2483 
2484 	if (found_memslot)
2485 		KVM_MMU_UNLOCK(kvm);
2486 }
2487 
2488 static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
2489 					  struct kvm_gfn_range *range)
2490 {
2491 	/*
2492 	 * Unconditionally add the range to the invalidation set, regardless of
2493 	 * whether or not the arch callback actually needs to zap SPTEs.  E.g.
2494 	 * if KVM supports RWX attributes in the future and the attributes are
2495 	 * going from R=>RW, zapping isn't strictly necessary.  Unconditionally
2496 	 * adding the range allows KVM to require that MMU invalidations add at
2497 	 * least one range between begin() and end(), e.g. allows KVM to detect
2498 	 * bugs where the add() is missed.  Relaxing the rule *might* be safe,
2499 	 * but it's not obvious that allowing new mappings while the attributes
2500 	 * are in flux is desirable or worth the complexity.
2501 	 */
2502 	kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
2503 
2504 	return kvm_arch_pre_set_memory_attributes(kvm, range);
2505 }
2506 
2507 /* Set @attributes for the gfn range [@start, @end). */
2508 static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2509 				     unsigned long attributes)
2510 {
2511 	struct kvm_mmu_notifier_range pre_set_range = {
2512 		.start = start,
2513 		.end = end,
2514 		.handler = kvm_pre_set_memory_attributes,
2515 		.on_lock = kvm_mmu_invalidate_begin,
2516 		.flush_on_ret = true,
2517 		.may_block = true,
2518 	};
2519 	struct kvm_mmu_notifier_range post_set_range = {
2520 		.start = start,
2521 		.end = end,
2522 		.arg.attributes = attributes,
2523 		.handler = kvm_arch_post_set_memory_attributes,
2524 		.on_lock = kvm_mmu_invalidate_end,
2525 		.may_block = true,
2526 	};
2527 	unsigned long i;
2528 	void *entry;
2529 	int r = 0;
2530 
2531 	entry = attributes ? xa_mk_value(attributes) : NULL;
2532 
2533 	mutex_lock(&kvm->slots_lock);
2534 
2535 	/* Nothing to do if the entire range as the desired attributes. */
2536 	if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
2537 		goto out_unlock;
2538 
2539 	/*
2540 	 * Reserve memory ahead of time to avoid having to deal with failures
2541 	 * partway through setting the new attributes.
2542 	 */
2543 	for (i = start; i < end; i++) {
2544 		r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
2545 		if (r)
2546 			goto out_unlock;
2547 	}
2548 
2549 	kvm_handle_gfn_range(kvm, &pre_set_range);
2550 
2551 	for (i = start; i < end; i++) {
2552 		r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
2553 				    GFP_KERNEL_ACCOUNT));
2554 		KVM_BUG_ON(r, kvm);
2555 	}
2556 
2557 	kvm_handle_gfn_range(kvm, &post_set_range);
2558 
2559 out_unlock:
2560 	mutex_unlock(&kvm->slots_lock);
2561 
2562 	return r;
2563 }
2564 static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
2565 					   struct kvm_memory_attributes *attrs)
2566 {
2567 	gfn_t start, end;
2568 
2569 	/* flags is currently not used. */
2570 	if (attrs->flags)
2571 		return -EINVAL;
2572 	if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
2573 		return -EINVAL;
2574 	if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
2575 		return -EINVAL;
2576 	if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
2577 		return -EINVAL;
2578 
2579 	start = attrs->address >> PAGE_SHIFT;
2580 	end = (attrs->address + attrs->size) >> PAGE_SHIFT;
2581 
2582 	/*
2583 	 * xarray tracks data using "unsigned long", and as a result so does
2584 	 * KVM.  For simplicity, supports generic attributes only on 64-bit
2585 	 * architectures.
2586 	 */
2587 	BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
2588 
2589 	return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
2590 }
2591 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2592 
2593 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2594 {
2595 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2596 }
2597 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2598 
2599 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2600 {
2601 	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2602 	u64 gen = slots->generation;
2603 	struct kvm_memory_slot *slot;
2604 
2605 	/*
2606 	 * This also protects against using a memslot from a different address space,
2607 	 * since different address spaces have different generation numbers.
2608 	 */
2609 	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2610 		vcpu->last_used_slot = NULL;
2611 		vcpu->last_used_slot_gen = gen;
2612 	}
2613 
2614 	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2615 	if (slot)
2616 		return slot;
2617 
2618 	/*
2619 	 * Fall back to searching all memslots. We purposely use
2620 	 * search_memslots() instead of __gfn_to_memslot() to avoid
2621 	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2622 	 */
2623 	slot = search_memslots(slots, gfn, false);
2624 	if (slot) {
2625 		vcpu->last_used_slot = slot;
2626 		return slot;
2627 	}
2628 
2629 	return NULL;
2630 }
2631 
2632 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2633 {
2634 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2635 
2636 	return kvm_is_visible_memslot(memslot);
2637 }
2638 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2639 
2640 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2641 {
2642 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2643 
2644 	return kvm_is_visible_memslot(memslot);
2645 }
2646 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2647 
2648 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2649 {
2650 	struct vm_area_struct *vma;
2651 	unsigned long addr, size;
2652 
2653 	size = PAGE_SIZE;
2654 
2655 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2656 	if (kvm_is_error_hva(addr))
2657 		return PAGE_SIZE;
2658 
2659 	mmap_read_lock(current->mm);
2660 	vma = find_vma(current->mm, addr);
2661 	if (!vma)
2662 		goto out;
2663 
2664 	size = vma_kernel_pagesize(vma);
2665 
2666 out:
2667 	mmap_read_unlock(current->mm);
2668 
2669 	return size;
2670 }
2671 
2672 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2673 {
2674 	return slot->flags & KVM_MEM_READONLY;
2675 }
2676 
2677 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2678 				       gfn_t *nr_pages, bool write)
2679 {
2680 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2681 		return KVM_HVA_ERR_BAD;
2682 
2683 	if (memslot_is_readonly(slot) && write)
2684 		return KVM_HVA_ERR_RO_BAD;
2685 
2686 	if (nr_pages)
2687 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
2688 
2689 	return __gfn_to_hva_memslot(slot, gfn);
2690 }
2691 
2692 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2693 				     gfn_t *nr_pages)
2694 {
2695 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2696 }
2697 
2698 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2699 					gfn_t gfn)
2700 {
2701 	return gfn_to_hva_many(slot, gfn, NULL);
2702 }
2703 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2704 
2705 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2706 {
2707 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2708 }
2709 EXPORT_SYMBOL_GPL(gfn_to_hva);
2710 
2711 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2712 {
2713 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2714 }
2715 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2716 
2717 /*
2718  * Return the hva of a @gfn and the R/W attribute if possible.
2719  *
2720  * @slot: the kvm_memory_slot which contains @gfn
2721  * @gfn: the gfn to be translated
2722  * @writable: used to return the read/write attribute of the @slot if the hva
2723  * is valid and @writable is not NULL
2724  */
2725 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2726 				      gfn_t gfn, bool *writable)
2727 {
2728 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2729 
2730 	if (!kvm_is_error_hva(hva) && writable)
2731 		*writable = !memslot_is_readonly(slot);
2732 
2733 	return hva;
2734 }
2735 
2736 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2737 {
2738 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2739 
2740 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2741 }
2742 
2743 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2744 {
2745 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2746 
2747 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
2748 }
2749 
2750 static inline int check_user_page_hwpoison(unsigned long addr)
2751 {
2752 	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2753 
2754 	rc = get_user_pages(addr, 1, flags, NULL);
2755 	return rc == -EHWPOISON;
2756 }
2757 
2758 /*
2759  * The fast path to get the writable pfn which will be stored in @pfn,
2760  * true indicates success, otherwise false is returned.  It's also the
2761  * only part that runs if we can in atomic context.
2762  */
2763 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2764 			    bool *writable, kvm_pfn_t *pfn)
2765 {
2766 	struct page *page[1];
2767 
2768 	/*
2769 	 * Fast pin a writable pfn only if it is a write fault request
2770 	 * or the caller allows to map a writable pfn for a read fault
2771 	 * request.
2772 	 */
2773 	if (!(write_fault || writable))
2774 		return false;
2775 
2776 	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2777 		*pfn = page_to_pfn(page[0]);
2778 
2779 		if (writable)
2780 			*writable = true;
2781 		return true;
2782 	}
2783 
2784 	return false;
2785 }
2786 
2787 /*
2788  * The slow path to get the pfn of the specified host virtual address,
2789  * 1 indicates success, -errno is returned if error is detected.
2790  */
2791 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2792 			   bool interruptible, bool *writable, kvm_pfn_t *pfn)
2793 {
2794 	/*
2795 	 * When a VCPU accesses a page that is not mapped into the secondary
2796 	 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
2797 	 * make progress. We always want to honor NUMA hinting faults in that
2798 	 * case, because GUP usage corresponds to memory accesses from the VCPU.
2799 	 * Otherwise, we'd not trigger NUMA hinting faults once a page is
2800 	 * mapped into the secondary MMU and gets accessed by a VCPU.
2801 	 *
2802 	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2803 	 * implicitly honor NUMA hinting faults and don't need this flag.
2804 	 */
2805 	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2806 	struct page *page;
2807 	int npages;
2808 
2809 	might_sleep();
2810 
2811 	if (writable)
2812 		*writable = write_fault;
2813 
2814 	if (write_fault)
2815 		flags |= FOLL_WRITE;
2816 	if (async)
2817 		flags |= FOLL_NOWAIT;
2818 	if (interruptible)
2819 		flags |= FOLL_INTERRUPTIBLE;
2820 
2821 	npages = get_user_pages_unlocked(addr, 1, &page, flags);
2822 	if (npages != 1)
2823 		return npages;
2824 
2825 	/* map read fault as writable if possible */
2826 	if (unlikely(!write_fault) && writable) {
2827 		struct page *wpage;
2828 
2829 		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2830 			*writable = true;
2831 			put_page(page);
2832 			page = wpage;
2833 		}
2834 	}
2835 	*pfn = page_to_pfn(page);
2836 	return npages;
2837 }
2838 
2839 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2840 {
2841 	if (unlikely(!(vma->vm_flags & VM_READ)))
2842 		return false;
2843 
2844 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2845 		return false;
2846 
2847 	return true;
2848 }
2849 
2850 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2851 {
2852 	struct page *page = kvm_pfn_to_refcounted_page(pfn);
2853 
2854 	if (!page)
2855 		return 1;
2856 
2857 	return get_page_unless_zero(page);
2858 }
2859 
2860 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2861 			       unsigned long addr, bool write_fault,
2862 			       bool *writable, kvm_pfn_t *p_pfn)
2863 {
2864 	kvm_pfn_t pfn;
2865 	pte_t *ptep;
2866 	pte_t pte;
2867 	spinlock_t *ptl;
2868 	int r;
2869 
2870 	r = follow_pte(vma, addr, &ptep, &ptl);
2871 	if (r) {
2872 		/*
2873 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2874 		 * not call the fault handler, so do it here.
2875 		 */
2876 		bool unlocked = false;
2877 		r = fixup_user_fault(current->mm, addr,
2878 				     (write_fault ? FAULT_FLAG_WRITE : 0),
2879 				     &unlocked);
2880 		if (unlocked)
2881 			return -EAGAIN;
2882 		if (r)
2883 			return r;
2884 
2885 		r = follow_pte(vma, addr, &ptep, &ptl);
2886 		if (r)
2887 			return r;
2888 	}
2889 
2890 	pte = ptep_get(ptep);
2891 
2892 	if (write_fault && !pte_write(pte)) {
2893 		pfn = KVM_PFN_ERR_RO_FAULT;
2894 		goto out;
2895 	}
2896 
2897 	if (writable)
2898 		*writable = pte_write(pte);
2899 	pfn = pte_pfn(pte);
2900 
2901 	/*
2902 	 * Get a reference here because callers of *hva_to_pfn* and
2903 	 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2904 	 * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
2905 	 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2906 	 * simply do nothing for reserved pfns.
2907 	 *
2908 	 * Whoever called remap_pfn_range is also going to call e.g.
2909 	 * unmap_mapping_range before the underlying pages are freed,
2910 	 * causing a call to our MMU notifier.
2911 	 *
2912 	 * Certain IO or PFNMAP mappings can be backed with valid
2913 	 * struct pages, but be allocated without refcounting e.g.,
2914 	 * tail pages of non-compound higher order allocations, which
2915 	 * would then underflow the refcount when the caller does the
2916 	 * required put_page. Don't allow those pages here.
2917 	 */
2918 	if (!kvm_try_get_pfn(pfn))
2919 		r = -EFAULT;
2920 
2921 out:
2922 	pte_unmap_unlock(ptep, ptl);
2923 	*p_pfn = pfn;
2924 
2925 	return r;
2926 }
2927 
2928 /*
2929  * Pin guest page in memory and return its pfn.
2930  * @addr: host virtual address which maps memory to the guest
2931  * @atomic: whether this function is forbidden from sleeping
2932  * @interruptible: whether the process can be interrupted by non-fatal signals
2933  * @async: whether this function need to wait IO complete if the
2934  *         host page is not in the memory
2935  * @write_fault: whether we should get a writable host page
2936  * @writable: whether it allows to map a writable host page for !@write_fault
2937  *
2938  * The function will map a writable host page for these two cases:
2939  * 1): @write_fault = true
2940  * 2): @write_fault = false && @writable, @writable will tell the caller
2941  *     whether the mapping is writable.
2942  */
2943 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2944 		     bool *async, bool write_fault, bool *writable)
2945 {
2946 	struct vm_area_struct *vma;
2947 	kvm_pfn_t pfn;
2948 	int npages, r;
2949 
2950 	/* we can do it either atomically or asynchronously, not both */
2951 	BUG_ON(atomic && async);
2952 
2953 	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2954 		return pfn;
2955 
2956 	if (atomic)
2957 		return KVM_PFN_ERR_FAULT;
2958 
2959 	npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2960 				 writable, &pfn);
2961 	if (npages == 1)
2962 		return pfn;
2963 	if (npages == -EINTR)
2964 		return KVM_PFN_ERR_SIGPENDING;
2965 
2966 	mmap_read_lock(current->mm);
2967 	if (npages == -EHWPOISON ||
2968 	      (!async && check_user_page_hwpoison(addr))) {
2969 		pfn = KVM_PFN_ERR_HWPOISON;
2970 		goto exit;
2971 	}
2972 
2973 retry:
2974 	vma = vma_lookup(current->mm, addr);
2975 
2976 	if (vma == NULL)
2977 		pfn = KVM_PFN_ERR_FAULT;
2978 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2979 		r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2980 		if (r == -EAGAIN)
2981 			goto retry;
2982 		if (r < 0)
2983 			pfn = KVM_PFN_ERR_FAULT;
2984 	} else {
2985 		if (async && vma_is_valid(vma, write_fault))
2986 			*async = true;
2987 		pfn = KVM_PFN_ERR_FAULT;
2988 	}
2989 exit:
2990 	mmap_read_unlock(current->mm);
2991 	return pfn;
2992 }
2993 
2994 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2995 			       bool atomic, bool interruptible, bool *async,
2996 			       bool write_fault, bool *writable, hva_t *hva)
2997 {
2998 	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2999 
3000 	if (hva)
3001 		*hva = addr;
3002 
3003 	if (kvm_is_error_hva(addr)) {
3004 		if (writable)
3005 			*writable = false;
3006 
3007 		return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
3008 						    KVM_PFN_NOSLOT;
3009 	}
3010 
3011 	/* Do not map writable pfn in the readonly memslot. */
3012 	if (writable && memslot_is_readonly(slot)) {
3013 		*writable = false;
3014 		writable = NULL;
3015 	}
3016 
3017 	return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
3018 			  writable);
3019 }
3020 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
3021 
3022 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
3023 		      bool *writable)
3024 {
3025 	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
3026 				    NULL, write_fault, writable, NULL);
3027 }
3028 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
3029 
3030 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
3031 {
3032 	return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
3033 				    NULL, NULL);
3034 }
3035 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
3036 
3037 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
3038 {
3039 	return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
3040 				    NULL, NULL);
3041 }
3042 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
3043 
3044 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
3045 {
3046 	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3047 }
3048 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
3049 
3050 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
3051 {
3052 	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
3053 }
3054 EXPORT_SYMBOL_GPL(gfn_to_pfn);
3055 
3056 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
3057 {
3058 	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
3059 }
3060 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
3061 
3062 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3063 			    struct page **pages, int nr_pages)
3064 {
3065 	unsigned long addr;
3066 	gfn_t entry = 0;
3067 
3068 	addr = gfn_to_hva_many(slot, gfn, &entry);
3069 	if (kvm_is_error_hva(addr))
3070 		return -1;
3071 
3072 	if (entry < nr_pages)
3073 		return 0;
3074 
3075 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3076 }
3077 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
3078 
3079 /*
3080  * Do not use this helper unless you are absolutely certain the gfn _must_ be
3081  * backed by 'struct page'.  A valid example is if the backing memslot is
3082  * controlled by KVM.  Note, if the returned page is valid, it's refcount has
3083  * been elevated by gfn_to_pfn().
3084  */
3085 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
3086 {
3087 	struct page *page;
3088 	kvm_pfn_t pfn;
3089 
3090 	pfn = gfn_to_pfn(kvm, gfn);
3091 
3092 	if (is_error_noslot_pfn(pfn))
3093 		return KVM_ERR_PTR_BAD_PAGE;
3094 
3095 	page = kvm_pfn_to_refcounted_page(pfn);
3096 	if (!page)
3097 		return KVM_ERR_PTR_BAD_PAGE;
3098 
3099 	return page;
3100 }
3101 EXPORT_SYMBOL_GPL(gfn_to_page);
3102 
3103 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
3104 {
3105 	if (dirty)
3106 		kvm_release_pfn_dirty(pfn);
3107 	else
3108 		kvm_release_pfn_clean(pfn);
3109 }
3110 
3111 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
3112 {
3113 	kvm_pfn_t pfn;
3114 	void *hva = NULL;
3115 	struct page *page = KVM_UNMAPPED_PAGE;
3116 
3117 	if (!map)
3118 		return -EINVAL;
3119 
3120 	pfn = gfn_to_pfn(vcpu->kvm, gfn);
3121 	if (is_error_noslot_pfn(pfn))
3122 		return -EINVAL;
3123 
3124 	if (pfn_valid(pfn)) {
3125 		page = pfn_to_page(pfn);
3126 		hva = kmap(page);
3127 #ifdef CONFIG_HAS_IOMEM
3128 	} else {
3129 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
3130 #endif
3131 	}
3132 
3133 	if (!hva)
3134 		return -EFAULT;
3135 
3136 	map->page = page;
3137 	map->hva = hva;
3138 	map->pfn = pfn;
3139 	map->gfn = gfn;
3140 
3141 	return 0;
3142 }
3143 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
3144 
3145 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
3146 {
3147 	if (!map)
3148 		return;
3149 
3150 	if (!map->hva)
3151 		return;
3152 
3153 	if (map->page != KVM_UNMAPPED_PAGE)
3154 		kunmap(map->page);
3155 #ifdef CONFIG_HAS_IOMEM
3156 	else
3157 		memunmap(map->hva);
3158 #endif
3159 
3160 	if (dirty)
3161 		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
3162 
3163 	kvm_release_pfn(map->pfn, dirty);
3164 
3165 	map->hva = NULL;
3166 	map->page = NULL;
3167 }
3168 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
3169 
3170 static bool kvm_is_ad_tracked_page(struct page *page)
3171 {
3172 	/*
3173 	 * Per page-flags.h, pages tagged PG_reserved "should in general not be
3174 	 * touched (e.g. set dirty) except by its owner".
3175 	 */
3176 	return !PageReserved(page);
3177 }
3178 
3179 static void kvm_set_page_dirty(struct page *page)
3180 {
3181 	if (kvm_is_ad_tracked_page(page))
3182 		SetPageDirty(page);
3183 }
3184 
3185 static void kvm_set_page_accessed(struct page *page)
3186 {
3187 	if (kvm_is_ad_tracked_page(page))
3188 		mark_page_accessed(page);
3189 }
3190 
3191 void kvm_release_page_clean(struct page *page)
3192 {
3193 	WARN_ON(is_error_page(page));
3194 
3195 	kvm_set_page_accessed(page);
3196 	put_page(page);
3197 }
3198 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
3199 
3200 void kvm_release_pfn_clean(kvm_pfn_t pfn)
3201 {
3202 	struct page *page;
3203 
3204 	if (is_error_noslot_pfn(pfn))
3205 		return;
3206 
3207 	page = kvm_pfn_to_refcounted_page(pfn);
3208 	if (!page)
3209 		return;
3210 
3211 	kvm_release_page_clean(page);
3212 }
3213 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
3214 
3215 void kvm_release_page_dirty(struct page *page)
3216 {
3217 	WARN_ON(is_error_page(page));
3218 
3219 	kvm_set_page_dirty(page);
3220 	kvm_release_page_clean(page);
3221 }
3222 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
3223 
3224 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
3225 {
3226 	struct page *page;
3227 
3228 	if (is_error_noslot_pfn(pfn))
3229 		return;
3230 
3231 	page = kvm_pfn_to_refcounted_page(pfn);
3232 	if (!page)
3233 		return;
3234 
3235 	kvm_release_page_dirty(page);
3236 }
3237 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
3238 
3239 /*
3240  * Note, checking for an error/noslot pfn is the caller's responsibility when
3241  * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
3242  * "set" helpers are not to be used when the pfn might point at garbage.
3243  */
3244 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
3245 {
3246 	if (WARN_ON(is_error_noslot_pfn(pfn)))
3247 		return;
3248 
3249 	if (pfn_valid(pfn))
3250 		kvm_set_page_dirty(pfn_to_page(pfn));
3251 }
3252 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
3253 
3254 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
3255 {
3256 	if (WARN_ON(is_error_noslot_pfn(pfn)))
3257 		return;
3258 
3259 	if (pfn_valid(pfn))
3260 		kvm_set_page_accessed(pfn_to_page(pfn));
3261 }
3262 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
3263 
3264 static int next_segment(unsigned long len, int offset)
3265 {
3266 	if (len > PAGE_SIZE - offset)
3267 		return PAGE_SIZE - offset;
3268 	else
3269 		return len;
3270 }
3271 
3272 /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
3273 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
3274 				 void *data, int offset, int len)
3275 {
3276 	int r;
3277 	unsigned long addr;
3278 
3279 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3280 	if (kvm_is_error_hva(addr))
3281 		return -EFAULT;
3282 	r = __copy_from_user(data, (void __user *)addr + offset, len);
3283 	if (r)
3284 		return -EFAULT;
3285 	return 0;
3286 }
3287 
3288 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
3289 			int len)
3290 {
3291 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3292 
3293 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3294 }
3295 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3296 
3297 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3298 			     int offset, int len)
3299 {
3300 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3301 
3302 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
3303 }
3304 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3305 
3306 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3307 {
3308 	gfn_t gfn = gpa >> PAGE_SHIFT;
3309 	int seg;
3310 	int offset = offset_in_page(gpa);
3311 	int ret;
3312 
3313 	while ((seg = next_segment(len, offset)) != 0) {
3314 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3315 		if (ret < 0)
3316 			return ret;
3317 		offset = 0;
3318 		len -= seg;
3319 		data += seg;
3320 		++gfn;
3321 	}
3322 	return 0;
3323 }
3324 EXPORT_SYMBOL_GPL(kvm_read_guest);
3325 
3326 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3327 {
3328 	gfn_t gfn = gpa >> PAGE_SHIFT;
3329 	int seg;
3330 	int offset = offset_in_page(gpa);
3331 	int ret;
3332 
3333 	while ((seg = next_segment(len, offset)) != 0) {
3334 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3335 		if (ret < 0)
3336 			return ret;
3337 		offset = 0;
3338 		len -= seg;
3339 		data += seg;
3340 		++gfn;
3341 	}
3342 	return 0;
3343 }
3344 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3345 
3346 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3347 			           void *data, int offset, unsigned long len)
3348 {
3349 	int r;
3350 	unsigned long addr;
3351 
3352 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3353 	if (kvm_is_error_hva(addr))
3354 		return -EFAULT;
3355 	pagefault_disable();
3356 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3357 	pagefault_enable();
3358 	if (r)
3359 		return -EFAULT;
3360 	return 0;
3361 }
3362 
3363 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3364 			       void *data, unsigned long len)
3365 {
3366 	gfn_t gfn = gpa >> PAGE_SHIFT;
3367 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3368 	int offset = offset_in_page(gpa);
3369 
3370 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3371 }
3372 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3373 
3374 /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
3375 static int __kvm_write_guest_page(struct kvm *kvm,
3376 				  struct kvm_memory_slot *memslot, gfn_t gfn,
3377 			          const void *data, int offset, int len)
3378 {
3379 	int r;
3380 	unsigned long addr;
3381 
3382 	addr = gfn_to_hva_memslot(memslot, gfn);
3383 	if (kvm_is_error_hva(addr))
3384 		return -EFAULT;
3385 	r = __copy_to_user((void __user *)addr + offset, data, len);
3386 	if (r)
3387 		return -EFAULT;
3388 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3389 	return 0;
3390 }
3391 
3392 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3393 			 const void *data, int offset, int len)
3394 {
3395 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3396 
3397 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3398 }
3399 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3400 
3401 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3402 			      const void *data, int offset, int len)
3403 {
3404 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3405 
3406 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3407 }
3408 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3409 
3410 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3411 		    unsigned long len)
3412 {
3413 	gfn_t gfn = gpa >> PAGE_SHIFT;
3414 	int seg;
3415 	int offset = offset_in_page(gpa);
3416 	int ret;
3417 
3418 	while ((seg = next_segment(len, offset)) != 0) {
3419 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3420 		if (ret < 0)
3421 			return ret;
3422 		offset = 0;
3423 		len -= seg;
3424 		data += seg;
3425 		++gfn;
3426 	}
3427 	return 0;
3428 }
3429 EXPORT_SYMBOL_GPL(kvm_write_guest);
3430 
3431 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3432 		         unsigned long len)
3433 {
3434 	gfn_t gfn = gpa >> PAGE_SHIFT;
3435 	int seg;
3436 	int offset = offset_in_page(gpa);
3437 	int ret;
3438 
3439 	while ((seg = next_segment(len, offset)) != 0) {
3440 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3441 		if (ret < 0)
3442 			return ret;
3443 		offset = 0;
3444 		len -= seg;
3445 		data += seg;
3446 		++gfn;
3447 	}
3448 	return 0;
3449 }
3450 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3451 
3452 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3453 				       struct gfn_to_hva_cache *ghc,
3454 				       gpa_t gpa, unsigned long len)
3455 {
3456 	int offset = offset_in_page(gpa);
3457 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3458 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3459 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3460 	gfn_t nr_pages_avail;
3461 
3462 	/* Update ghc->generation before performing any error checks. */
3463 	ghc->generation = slots->generation;
3464 
3465 	if (start_gfn > end_gfn) {
3466 		ghc->hva = KVM_HVA_ERR_BAD;
3467 		return -EINVAL;
3468 	}
3469 
3470 	/*
3471 	 * If the requested region crosses two memslots, we still
3472 	 * verify that the entire region is valid here.
3473 	 */
3474 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3475 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3476 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3477 					   &nr_pages_avail);
3478 		if (kvm_is_error_hva(ghc->hva))
3479 			return -EFAULT;
3480 	}
3481 
3482 	/* Use the slow path for cross page reads and writes. */
3483 	if (nr_pages_needed == 1)
3484 		ghc->hva += offset;
3485 	else
3486 		ghc->memslot = NULL;
3487 
3488 	ghc->gpa = gpa;
3489 	ghc->len = len;
3490 	return 0;
3491 }
3492 
3493 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3494 			      gpa_t gpa, unsigned long len)
3495 {
3496 	struct kvm_memslots *slots = kvm_memslots(kvm);
3497 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3498 }
3499 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3500 
3501 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3502 				  void *data, unsigned int offset,
3503 				  unsigned long len)
3504 {
3505 	struct kvm_memslots *slots = kvm_memslots(kvm);
3506 	int r;
3507 	gpa_t gpa = ghc->gpa + offset;
3508 
3509 	if (WARN_ON_ONCE(len + offset > ghc->len))
3510 		return -EINVAL;
3511 
3512 	if (slots->generation != ghc->generation) {
3513 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3514 			return -EFAULT;
3515 	}
3516 
3517 	if (kvm_is_error_hva(ghc->hva))
3518 		return -EFAULT;
3519 
3520 	if (unlikely(!ghc->memslot))
3521 		return kvm_write_guest(kvm, gpa, data, len);
3522 
3523 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3524 	if (r)
3525 		return -EFAULT;
3526 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3527 
3528 	return 0;
3529 }
3530 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3531 
3532 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3533 			   void *data, unsigned long len)
3534 {
3535 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3536 }
3537 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3538 
3539 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3540 				 void *data, unsigned int offset,
3541 				 unsigned long len)
3542 {
3543 	struct kvm_memslots *slots = kvm_memslots(kvm);
3544 	int r;
3545 	gpa_t gpa = ghc->gpa + offset;
3546 
3547 	if (WARN_ON_ONCE(len + offset > ghc->len))
3548 		return -EINVAL;
3549 
3550 	if (slots->generation != ghc->generation) {
3551 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3552 			return -EFAULT;
3553 	}
3554 
3555 	if (kvm_is_error_hva(ghc->hva))
3556 		return -EFAULT;
3557 
3558 	if (unlikely(!ghc->memslot))
3559 		return kvm_read_guest(kvm, gpa, data, len);
3560 
3561 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3562 	if (r)
3563 		return -EFAULT;
3564 
3565 	return 0;
3566 }
3567 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3568 
3569 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3570 			  void *data, unsigned long len)
3571 {
3572 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3573 }
3574 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3575 
3576 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3577 {
3578 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3579 	gfn_t gfn = gpa >> PAGE_SHIFT;
3580 	int seg;
3581 	int offset = offset_in_page(gpa);
3582 	int ret;
3583 
3584 	while ((seg = next_segment(len, offset)) != 0) {
3585 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3586 		if (ret < 0)
3587 			return ret;
3588 		offset = 0;
3589 		len -= seg;
3590 		++gfn;
3591 	}
3592 	return 0;
3593 }
3594 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3595 
3596 void mark_page_dirty_in_slot(struct kvm *kvm,
3597 			     const struct kvm_memory_slot *memslot,
3598 		 	     gfn_t gfn)
3599 {
3600 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3601 
3602 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3603 	if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
3604 		return;
3605 
3606 	WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
3607 #endif
3608 
3609 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3610 		unsigned long rel_gfn = gfn - memslot->base_gfn;
3611 		u32 slot = (memslot->as_id << 16) | memslot->id;
3612 
3613 		if (kvm->dirty_ring_size && vcpu)
3614 			kvm_dirty_ring_push(vcpu, slot, rel_gfn);
3615 		else if (memslot->dirty_bitmap)
3616 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
3617 	}
3618 }
3619 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3620 
3621 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3622 {
3623 	struct kvm_memory_slot *memslot;
3624 
3625 	memslot = gfn_to_memslot(kvm, gfn);
3626 	mark_page_dirty_in_slot(kvm, memslot, gfn);
3627 }
3628 EXPORT_SYMBOL_GPL(mark_page_dirty);
3629 
3630 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3631 {
3632 	struct kvm_memory_slot *memslot;
3633 
3634 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3635 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3636 }
3637 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3638 
3639 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3640 {
3641 	if (!vcpu->sigset_active)
3642 		return;
3643 
3644 	/*
3645 	 * This does a lockless modification of ->real_blocked, which is fine
3646 	 * because, only current can change ->real_blocked and all readers of
3647 	 * ->real_blocked don't care as long ->real_blocked is always a subset
3648 	 * of ->blocked.
3649 	 */
3650 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3651 }
3652 
3653 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3654 {
3655 	if (!vcpu->sigset_active)
3656 		return;
3657 
3658 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3659 	sigemptyset(&current->real_blocked);
3660 }
3661 
3662 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3663 {
3664 	unsigned int old, val, grow, grow_start;
3665 
3666 	old = val = vcpu->halt_poll_ns;
3667 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3668 	grow = READ_ONCE(halt_poll_ns_grow);
3669 	if (!grow)
3670 		goto out;
3671 
3672 	val *= grow;
3673 	if (val < grow_start)
3674 		val = grow_start;
3675 
3676 	vcpu->halt_poll_ns = val;
3677 out:
3678 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3679 }
3680 
3681 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3682 {
3683 	unsigned int old, val, shrink, grow_start;
3684 
3685 	old = val = vcpu->halt_poll_ns;
3686 	shrink = READ_ONCE(halt_poll_ns_shrink);
3687 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3688 	if (shrink == 0)
3689 		val = 0;
3690 	else
3691 		val /= shrink;
3692 
3693 	if (val < grow_start)
3694 		val = 0;
3695 
3696 	vcpu->halt_poll_ns = val;
3697 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3698 }
3699 
3700 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3701 {
3702 	int ret = -EINTR;
3703 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3704 
3705 	if (kvm_arch_vcpu_runnable(vcpu))
3706 		goto out;
3707 	if (kvm_cpu_has_pending_timer(vcpu))
3708 		goto out;
3709 	if (signal_pending(current))
3710 		goto out;
3711 	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3712 		goto out;
3713 
3714 	ret = 0;
3715 out:
3716 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3717 	return ret;
3718 }
3719 
3720 /*
3721  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3722  * pending.  This is mostly used when halting a vCPU, but may also be used
3723  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3724  */
3725 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3726 {
3727 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3728 	bool waited = false;
3729 
3730 	vcpu->stat.generic.blocking = 1;
3731 
3732 	preempt_disable();
3733 	kvm_arch_vcpu_blocking(vcpu);
3734 	prepare_to_rcuwait(wait);
3735 	preempt_enable();
3736 
3737 	for (;;) {
3738 		set_current_state(TASK_INTERRUPTIBLE);
3739 
3740 		if (kvm_vcpu_check_block(vcpu) < 0)
3741 			break;
3742 
3743 		waited = true;
3744 		schedule();
3745 	}
3746 
3747 	preempt_disable();
3748 	finish_rcuwait(wait);
3749 	kvm_arch_vcpu_unblocking(vcpu);
3750 	preempt_enable();
3751 
3752 	vcpu->stat.generic.blocking = 0;
3753 
3754 	return waited;
3755 }
3756 
3757 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3758 					  ktime_t end, bool success)
3759 {
3760 	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3761 	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3762 
3763 	++vcpu->stat.generic.halt_attempted_poll;
3764 
3765 	if (success) {
3766 		++vcpu->stat.generic.halt_successful_poll;
3767 
3768 		if (!vcpu_valid_wakeup(vcpu))
3769 			++vcpu->stat.generic.halt_poll_invalid;
3770 
3771 		stats->halt_poll_success_ns += poll_ns;
3772 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3773 	} else {
3774 		stats->halt_poll_fail_ns += poll_ns;
3775 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3776 	}
3777 }
3778 
3779 static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
3780 {
3781 	struct kvm *kvm = vcpu->kvm;
3782 
3783 	if (kvm->override_halt_poll_ns) {
3784 		/*
3785 		 * Ensure kvm->max_halt_poll_ns is not read before
3786 		 * kvm->override_halt_poll_ns.
3787 		 *
3788 		 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
3789 		 */
3790 		smp_rmb();
3791 		return READ_ONCE(kvm->max_halt_poll_ns);
3792 	}
3793 
3794 	return READ_ONCE(halt_poll_ns);
3795 }
3796 
3797 /*
3798  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3799  * polling is enabled, busy wait for a short time before blocking to avoid the
3800  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3801  * is halted.
3802  */
3803 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3804 {
3805 	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3806 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3807 	ktime_t start, cur, poll_end;
3808 	bool waited = false;
3809 	bool do_halt_poll;
3810 	u64 halt_ns;
3811 
3812 	if (vcpu->halt_poll_ns > max_halt_poll_ns)
3813 		vcpu->halt_poll_ns = max_halt_poll_ns;
3814 
3815 	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3816 
3817 	start = cur = poll_end = ktime_get();
3818 	if (do_halt_poll) {
3819 		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3820 
3821 		do {
3822 			if (kvm_vcpu_check_block(vcpu) < 0)
3823 				goto out;
3824 			cpu_relax();
3825 			poll_end = cur = ktime_get();
3826 		} while (kvm_vcpu_can_poll(cur, stop));
3827 	}
3828 
3829 	waited = kvm_vcpu_block(vcpu);
3830 
3831 	cur = ktime_get();
3832 	if (waited) {
3833 		vcpu->stat.generic.halt_wait_ns +=
3834 			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3835 		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3836 				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3837 	}
3838 out:
3839 	/* The total time the vCPU was "halted", including polling time. */
3840 	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3841 
3842 	/*
3843 	 * Note, halt-polling is considered successful so long as the vCPU was
3844 	 * never actually scheduled out, i.e. even if the wake event arrived
3845 	 * after of the halt-polling loop itself, but before the full wait.
3846 	 */
3847 	if (do_halt_poll)
3848 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
3849 
3850 	if (halt_poll_allowed) {
3851 		/* Recompute the max halt poll time in case it changed. */
3852 		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3853 
3854 		if (!vcpu_valid_wakeup(vcpu)) {
3855 			shrink_halt_poll_ns(vcpu);
3856 		} else if (max_halt_poll_ns) {
3857 			if (halt_ns <= vcpu->halt_poll_ns)
3858 				;
3859 			/* we had a long block, shrink polling */
3860 			else if (vcpu->halt_poll_ns &&
3861 				 halt_ns > max_halt_poll_ns)
3862 				shrink_halt_poll_ns(vcpu);
3863 			/* we had a short halt and our poll time is too small */
3864 			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3865 				 halt_ns < max_halt_poll_ns)
3866 				grow_halt_poll_ns(vcpu);
3867 		} else {
3868 			vcpu->halt_poll_ns = 0;
3869 		}
3870 	}
3871 
3872 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3873 }
3874 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3875 
3876 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3877 {
3878 	if (__kvm_vcpu_wake_up(vcpu)) {
3879 		WRITE_ONCE(vcpu->ready, true);
3880 		++vcpu->stat.generic.halt_wakeup;
3881 		return true;
3882 	}
3883 
3884 	return false;
3885 }
3886 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3887 
3888 #ifndef CONFIG_S390
3889 /*
3890  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3891  */
3892 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3893 {
3894 	int me, cpu;
3895 
3896 	if (kvm_vcpu_wake_up(vcpu))
3897 		return;
3898 
3899 	me = get_cpu();
3900 	/*
3901 	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3902 	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3903 	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3904 	 * within the vCPU thread itself.
3905 	 */
3906 	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3907 		if (vcpu->mode == IN_GUEST_MODE)
3908 			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3909 		goto out;
3910 	}
3911 
3912 	/*
3913 	 * Note, the vCPU could get migrated to a different pCPU at any point
3914 	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
3915 	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3916 	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3917 	 * vCPU also requires it to leave IN_GUEST_MODE.
3918 	 */
3919 	if (kvm_arch_vcpu_should_kick(vcpu)) {
3920 		cpu = READ_ONCE(vcpu->cpu);
3921 		if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3922 			smp_send_reschedule(cpu);
3923 	}
3924 out:
3925 	put_cpu();
3926 }
3927 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3928 #endif /* !CONFIG_S390 */
3929 
3930 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3931 {
3932 	struct pid *pid;
3933 	struct task_struct *task = NULL;
3934 	int ret = 0;
3935 
3936 	rcu_read_lock();
3937 	pid = rcu_dereference(target->pid);
3938 	if (pid)
3939 		task = get_pid_task(pid, PIDTYPE_PID);
3940 	rcu_read_unlock();
3941 	if (!task)
3942 		return ret;
3943 	ret = yield_to(task, 1);
3944 	put_task_struct(task);
3945 
3946 	return ret;
3947 }
3948 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3949 
3950 /*
3951  * Helper that checks whether a VCPU is eligible for directed yield.
3952  * Most eligible candidate to yield is decided by following heuristics:
3953  *
3954  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3955  *  (preempted lock holder), indicated by @in_spin_loop.
3956  *  Set at the beginning and cleared at the end of interception/PLE handler.
3957  *
3958  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3959  *  chance last time (mostly it has become eligible now since we have probably
3960  *  yielded to lockholder in last iteration. This is done by toggling
3961  *  @dy_eligible each time a VCPU checked for eligibility.)
3962  *
3963  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3964  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3965  *  burning. Giving priority for a potential lock-holder increases lock
3966  *  progress.
3967  *
3968  *  Since algorithm is based on heuristics, accessing another VCPU data without
3969  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3970  *  and continue with next VCPU and so on.
3971  */
3972 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3973 {
3974 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3975 	bool eligible;
3976 
3977 	eligible = !vcpu->spin_loop.in_spin_loop ||
3978 		    vcpu->spin_loop.dy_eligible;
3979 
3980 	if (vcpu->spin_loop.in_spin_loop)
3981 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3982 
3983 	return eligible;
3984 #else
3985 	return true;
3986 #endif
3987 }
3988 
3989 /*
3990  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3991  * a vcpu_load/vcpu_put pair.  However, for most architectures
3992  * kvm_arch_vcpu_runnable does not require vcpu_load.
3993  */
3994 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3995 {
3996 	return kvm_arch_vcpu_runnable(vcpu);
3997 }
3998 
3999 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
4000 {
4001 	if (kvm_arch_dy_runnable(vcpu))
4002 		return true;
4003 
4004 #ifdef CONFIG_KVM_ASYNC_PF
4005 	if (!list_empty_careful(&vcpu->async_pf.done))
4006 		return true;
4007 #endif
4008 
4009 	return false;
4010 }
4011 
4012 /*
4013  * By default, simply query the target vCPU's current mode when checking if a
4014  * vCPU was preempted in kernel mode.  All architectures except x86 (or more
4015  * specifical, except VMX) allow querying whether or not a vCPU is in kernel
4016  * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
4017  * directly for cross-vCPU checks is functionally correct and accurate.
4018  */
4019 bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
4020 {
4021 	return kvm_arch_vcpu_in_kernel(vcpu);
4022 }
4023 
4024 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
4025 {
4026 	return false;
4027 }
4028 
4029 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
4030 {
4031 	struct kvm *kvm = me->kvm;
4032 	struct kvm_vcpu *vcpu;
4033 	int last_boosted_vcpu;
4034 	unsigned long i;
4035 	int yielded = 0;
4036 	int try = 3;
4037 	int pass;
4038 
4039 	last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
4040 	kvm_vcpu_set_in_spin_loop(me, true);
4041 	/*
4042 	 * We boost the priority of a VCPU that is runnable but not
4043 	 * currently running, because it got preempted by something
4044 	 * else and called schedule in __vcpu_run.  Hopefully that
4045 	 * VCPU is holding the lock that we need and will release it.
4046 	 * We approximate round-robin by starting at the last boosted VCPU.
4047 	 */
4048 	for (pass = 0; pass < 2 && !yielded && try; pass++) {
4049 		kvm_for_each_vcpu(i, vcpu, kvm) {
4050 			if (!pass && i <= last_boosted_vcpu) {
4051 				i = last_boosted_vcpu;
4052 				continue;
4053 			} else if (pass && i > last_boosted_vcpu)
4054 				break;
4055 			if (!READ_ONCE(vcpu->ready))
4056 				continue;
4057 			if (vcpu == me)
4058 				continue;
4059 			if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
4060 				continue;
4061 
4062 			/*
4063 			 * Treat the target vCPU as being in-kernel if it has a
4064 			 * pending interrupt, as the vCPU trying to yield may
4065 			 * be spinning waiting on IPI delivery, i.e. the target
4066 			 * vCPU is in-kernel for the purposes of directed yield.
4067 			 */
4068 			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
4069 			    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
4070 			    !kvm_arch_vcpu_preempted_in_kernel(vcpu))
4071 				continue;
4072 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
4073 				continue;
4074 
4075 			yielded = kvm_vcpu_yield_to(vcpu);
4076 			if (yielded > 0) {
4077 				WRITE_ONCE(kvm->last_boosted_vcpu, i);
4078 				break;
4079 			} else if (yielded < 0) {
4080 				try--;
4081 				if (!try)
4082 					break;
4083 			}
4084 		}
4085 	}
4086 	kvm_vcpu_set_in_spin_loop(me, false);
4087 
4088 	/* Ensure vcpu is not eligible during next spinloop */
4089 	kvm_vcpu_set_dy_eligible(me, false);
4090 }
4091 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
4092 
4093 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
4094 {
4095 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4096 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
4097 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
4098 	     kvm->dirty_ring_size / PAGE_SIZE);
4099 #else
4100 	return false;
4101 #endif
4102 }
4103 
4104 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
4105 {
4106 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
4107 	struct page *page;
4108 
4109 	if (vmf->pgoff == 0)
4110 		page = virt_to_page(vcpu->run);
4111 #ifdef CONFIG_X86
4112 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
4113 		page = virt_to_page(vcpu->arch.pio_data);
4114 #endif
4115 #ifdef CONFIG_KVM_MMIO
4116 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
4117 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
4118 #endif
4119 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
4120 		page = kvm_dirty_ring_get_page(
4121 		    &vcpu->dirty_ring,
4122 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
4123 	else
4124 		return kvm_arch_vcpu_fault(vcpu, vmf);
4125 	get_page(page);
4126 	vmf->page = page;
4127 	return 0;
4128 }
4129 
4130 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
4131 	.fault = kvm_vcpu_fault,
4132 };
4133 
4134 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
4135 {
4136 	struct kvm_vcpu *vcpu = file->private_data;
4137 	unsigned long pages = vma_pages(vma);
4138 
4139 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
4140 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
4141 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
4142 		return -EINVAL;
4143 
4144 	vma->vm_ops = &kvm_vcpu_vm_ops;
4145 	return 0;
4146 }
4147 
4148 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
4149 {
4150 	struct kvm_vcpu *vcpu = filp->private_data;
4151 
4152 	kvm_put_kvm(vcpu->kvm);
4153 	return 0;
4154 }
4155 
4156 static struct file_operations kvm_vcpu_fops = {
4157 	.release        = kvm_vcpu_release,
4158 	.unlocked_ioctl = kvm_vcpu_ioctl,
4159 	.mmap           = kvm_vcpu_mmap,
4160 	.llseek		= noop_llseek,
4161 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
4162 };
4163 
4164 /*
4165  * Allocates an inode for the vcpu.
4166  */
4167 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
4168 {
4169 	char name[8 + 1 + ITOA_MAX_LEN + 1];
4170 
4171 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
4172 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
4173 }
4174 
4175 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
4176 static int vcpu_get_pid(void *data, u64 *val)
4177 {
4178 	struct kvm_vcpu *vcpu = data;
4179 
4180 	rcu_read_lock();
4181 	*val = pid_nr(rcu_dereference(vcpu->pid));
4182 	rcu_read_unlock();
4183 	return 0;
4184 }
4185 
4186 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
4187 
4188 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
4189 {
4190 	struct dentry *debugfs_dentry;
4191 	char dir_name[ITOA_MAX_LEN * 2];
4192 
4193 	if (!debugfs_initialized())
4194 		return;
4195 
4196 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
4197 	debugfs_dentry = debugfs_create_dir(dir_name,
4198 					    vcpu->kvm->debugfs_dentry);
4199 	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
4200 			    &vcpu_get_pid_fops);
4201 
4202 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
4203 }
4204 #endif
4205 
4206 /*
4207  * Creates some virtual cpus.  Good luck creating more than one.
4208  */
4209 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
4210 {
4211 	int r;
4212 	struct kvm_vcpu *vcpu;
4213 	struct page *page;
4214 
4215 	/*
4216 	 * KVM tracks vCPU IDs as 'int', be kind to userspace and reject
4217 	 * too-large values instead of silently truncating.
4218 	 *
4219 	 * Ensure KVM_MAX_VCPU_IDS isn't pushed above INT_MAX without first
4220 	 * changing the storage type (at the very least, IDs should be tracked
4221 	 * as unsigned ints).
4222 	 */
4223 	BUILD_BUG_ON(KVM_MAX_VCPU_IDS > INT_MAX);
4224 	if (id >= KVM_MAX_VCPU_IDS)
4225 		return -EINVAL;
4226 
4227 	mutex_lock(&kvm->lock);
4228 	if (kvm->created_vcpus >= kvm->max_vcpus) {
4229 		mutex_unlock(&kvm->lock);
4230 		return -EINVAL;
4231 	}
4232 
4233 	r = kvm_arch_vcpu_precreate(kvm, id);
4234 	if (r) {
4235 		mutex_unlock(&kvm->lock);
4236 		return r;
4237 	}
4238 
4239 	kvm->created_vcpus++;
4240 	mutex_unlock(&kvm->lock);
4241 
4242 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
4243 	if (!vcpu) {
4244 		r = -ENOMEM;
4245 		goto vcpu_decrement;
4246 	}
4247 
4248 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
4249 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4250 	if (!page) {
4251 		r = -ENOMEM;
4252 		goto vcpu_free;
4253 	}
4254 	vcpu->run = page_address(page);
4255 
4256 	kvm_vcpu_init(vcpu, kvm, id);
4257 
4258 	r = kvm_arch_vcpu_create(vcpu);
4259 	if (r)
4260 		goto vcpu_free_run_page;
4261 
4262 	if (kvm->dirty_ring_size) {
4263 		r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
4264 					 id, kvm->dirty_ring_size);
4265 		if (r)
4266 			goto arch_vcpu_destroy;
4267 	}
4268 
4269 	mutex_lock(&kvm->lock);
4270 
4271 #ifdef CONFIG_LOCKDEP
4272 	/* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
4273 	mutex_lock(&vcpu->mutex);
4274 	mutex_unlock(&vcpu->mutex);
4275 #endif
4276 
4277 	if (kvm_get_vcpu_by_id(kvm, id)) {
4278 		r = -EEXIST;
4279 		goto unlock_vcpu_destroy;
4280 	}
4281 
4282 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
4283 	r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4284 	if (r)
4285 		goto unlock_vcpu_destroy;
4286 
4287 	/* Now it's all set up, let userspace reach it */
4288 	kvm_get_kvm(kvm);
4289 	r = create_vcpu_fd(vcpu);
4290 	if (r < 0)
4291 		goto kvm_put_xa_release;
4292 
4293 	if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4294 		r = -EINVAL;
4295 		goto kvm_put_xa_release;
4296 	}
4297 
4298 	/*
4299 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
4300 	 * pointer before kvm->online_vcpu's incremented value.
4301 	 */
4302 	smp_wmb();
4303 	atomic_inc(&kvm->online_vcpus);
4304 
4305 	mutex_unlock(&kvm->lock);
4306 	kvm_arch_vcpu_postcreate(vcpu);
4307 	kvm_create_vcpu_debugfs(vcpu);
4308 	return r;
4309 
4310 kvm_put_xa_release:
4311 	kvm_put_kvm_no_destroy(kvm);
4312 	xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
4313 unlock_vcpu_destroy:
4314 	mutex_unlock(&kvm->lock);
4315 	kvm_dirty_ring_free(&vcpu->dirty_ring);
4316 arch_vcpu_destroy:
4317 	kvm_arch_vcpu_destroy(vcpu);
4318 vcpu_free_run_page:
4319 	free_page((unsigned long)vcpu->run);
4320 vcpu_free:
4321 	kmem_cache_free(kvm_vcpu_cache, vcpu);
4322 vcpu_decrement:
4323 	mutex_lock(&kvm->lock);
4324 	kvm->created_vcpus--;
4325 	mutex_unlock(&kvm->lock);
4326 	return r;
4327 }
4328 
4329 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4330 {
4331 	if (sigset) {
4332 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4333 		vcpu->sigset_active = 1;
4334 		vcpu->sigset = *sigset;
4335 	} else
4336 		vcpu->sigset_active = 0;
4337 	return 0;
4338 }
4339 
4340 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4341 			      size_t size, loff_t *offset)
4342 {
4343 	struct kvm_vcpu *vcpu = file->private_data;
4344 
4345 	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4346 			&kvm_vcpu_stats_desc[0], &vcpu->stat,
4347 			sizeof(vcpu->stat), user_buffer, size, offset);
4348 }
4349 
4350 static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4351 {
4352 	struct kvm_vcpu *vcpu = file->private_data;
4353 
4354 	kvm_put_kvm(vcpu->kvm);
4355 	return 0;
4356 }
4357 
4358 static const struct file_operations kvm_vcpu_stats_fops = {
4359 	.owner = THIS_MODULE,
4360 	.read = kvm_vcpu_stats_read,
4361 	.release = kvm_vcpu_stats_release,
4362 	.llseek = noop_llseek,
4363 };
4364 
4365 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
4366 {
4367 	int fd;
4368 	struct file *file;
4369 	char name[15 + ITOA_MAX_LEN + 1];
4370 
4371 	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4372 
4373 	fd = get_unused_fd_flags(O_CLOEXEC);
4374 	if (fd < 0)
4375 		return fd;
4376 
4377 	file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4378 	if (IS_ERR(file)) {
4379 		put_unused_fd(fd);
4380 		return PTR_ERR(file);
4381 	}
4382 
4383 	kvm_get_kvm(vcpu->kvm);
4384 
4385 	file->f_mode |= FMODE_PREAD;
4386 	fd_install(fd, file);
4387 
4388 	return fd;
4389 }
4390 
4391 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4392 static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
4393 				     struct kvm_pre_fault_memory *range)
4394 {
4395 	int idx;
4396 	long r;
4397 	u64 full_size;
4398 
4399 	if (range->flags)
4400 		return -EINVAL;
4401 
4402 	if (!PAGE_ALIGNED(range->gpa) ||
4403 	    !PAGE_ALIGNED(range->size) ||
4404 	    range->gpa + range->size <= range->gpa)
4405 		return -EINVAL;
4406 
4407 	vcpu_load(vcpu);
4408 	idx = srcu_read_lock(&vcpu->kvm->srcu);
4409 
4410 	full_size = range->size;
4411 	do {
4412 		if (signal_pending(current)) {
4413 			r = -EINTR;
4414 			break;
4415 		}
4416 
4417 		r = kvm_arch_vcpu_pre_fault_memory(vcpu, range);
4418 		if (WARN_ON_ONCE(r == 0 || r == -EIO))
4419 			break;
4420 
4421 		if (r < 0)
4422 			break;
4423 
4424 		range->size -= r;
4425 		range->gpa += r;
4426 		cond_resched();
4427 	} while (range->size);
4428 
4429 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
4430 	vcpu_put(vcpu);
4431 
4432 	/* Return success if at least one page was mapped successfully.  */
4433 	return full_size == range->size ? r : 0;
4434 }
4435 #endif
4436 
4437 static long kvm_vcpu_ioctl(struct file *filp,
4438 			   unsigned int ioctl, unsigned long arg)
4439 {
4440 	struct kvm_vcpu *vcpu = filp->private_data;
4441 	void __user *argp = (void __user *)arg;
4442 	int r;
4443 	struct kvm_fpu *fpu = NULL;
4444 	struct kvm_sregs *kvm_sregs = NULL;
4445 
4446 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4447 		return -EIO;
4448 
4449 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4450 		return -EINVAL;
4451 
4452 	/*
4453 	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
4454 	 * execution; mutex_lock() would break them.
4455 	 */
4456 	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4457 	if (r != -ENOIOCTLCMD)
4458 		return r;
4459 
4460 	if (mutex_lock_killable(&vcpu->mutex))
4461 		return -EINTR;
4462 	switch (ioctl) {
4463 	case KVM_RUN: {
4464 		struct pid *oldpid;
4465 		r = -EINVAL;
4466 		if (arg)
4467 			goto out;
4468 		oldpid = rcu_access_pointer(vcpu->pid);
4469 		if (unlikely(oldpid != task_pid(current))) {
4470 			/* The thread running this VCPU changed. */
4471 			struct pid *newpid;
4472 
4473 			r = kvm_arch_vcpu_run_pid_change(vcpu);
4474 			if (r)
4475 				break;
4476 
4477 			newpid = get_task_pid(current, PIDTYPE_PID);
4478 			rcu_assign_pointer(vcpu->pid, newpid);
4479 			if (oldpid)
4480 				synchronize_rcu();
4481 			put_pid(oldpid);
4482 		}
4483 		vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit__unsafe);
4484 		r = kvm_arch_vcpu_ioctl_run(vcpu);
4485 		vcpu->wants_to_run = false;
4486 
4487 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4488 		break;
4489 	}
4490 	case KVM_GET_REGS: {
4491 		struct kvm_regs *kvm_regs;
4492 
4493 		r = -ENOMEM;
4494 		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
4495 		if (!kvm_regs)
4496 			goto out;
4497 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4498 		if (r)
4499 			goto out_free1;
4500 		r = -EFAULT;
4501 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4502 			goto out_free1;
4503 		r = 0;
4504 out_free1:
4505 		kfree(kvm_regs);
4506 		break;
4507 	}
4508 	case KVM_SET_REGS: {
4509 		struct kvm_regs *kvm_regs;
4510 
4511 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4512 		if (IS_ERR(kvm_regs)) {
4513 			r = PTR_ERR(kvm_regs);
4514 			goto out;
4515 		}
4516 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4517 		kfree(kvm_regs);
4518 		break;
4519 	}
4520 	case KVM_GET_SREGS: {
4521 		kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
4522 		r = -ENOMEM;
4523 		if (!kvm_sregs)
4524 			goto out;
4525 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4526 		if (r)
4527 			goto out;
4528 		r = -EFAULT;
4529 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4530 			goto out;
4531 		r = 0;
4532 		break;
4533 	}
4534 	case KVM_SET_SREGS: {
4535 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4536 		if (IS_ERR(kvm_sregs)) {
4537 			r = PTR_ERR(kvm_sregs);
4538 			kvm_sregs = NULL;
4539 			goto out;
4540 		}
4541 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4542 		break;
4543 	}
4544 	case KVM_GET_MP_STATE: {
4545 		struct kvm_mp_state mp_state;
4546 
4547 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4548 		if (r)
4549 			goto out;
4550 		r = -EFAULT;
4551 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4552 			goto out;
4553 		r = 0;
4554 		break;
4555 	}
4556 	case KVM_SET_MP_STATE: {
4557 		struct kvm_mp_state mp_state;
4558 
4559 		r = -EFAULT;
4560 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4561 			goto out;
4562 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4563 		break;
4564 	}
4565 	case KVM_TRANSLATE: {
4566 		struct kvm_translation tr;
4567 
4568 		r = -EFAULT;
4569 		if (copy_from_user(&tr, argp, sizeof(tr)))
4570 			goto out;
4571 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4572 		if (r)
4573 			goto out;
4574 		r = -EFAULT;
4575 		if (copy_to_user(argp, &tr, sizeof(tr)))
4576 			goto out;
4577 		r = 0;
4578 		break;
4579 	}
4580 	case KVM_SET_GUEST_DEBUG: {
4581 		struct kvm_guest_debug dbg;
4582 
4583 		r = -EFAULT;
4584 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
4585 			goto out;
4586 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4587 		break;
4588 	}
4589 	case KVM_SET_SIGNAL_MASK: {
4590 		struct kvm_signal_mask __user *sigmask_arg = argp;
4591 		struct kvm_signal_mask kvm_sigmask;
4592 		sigset_t sigset, *p;
4593 
4594 		p = NULL;
4595 		if (argp) {
4596 			r = -EFAULT;
4597 			if (copy_from_user(&kvm_sigmask, argp,
4598 					   sizeof(kvm_sigmask)))
4599 				goto out;
4600 			r = -EINVAL;
4601 			if (kvm_sigmask.len != sizeof(sigset))
4602 				goto out;
4603 			r = -EFAULT;
4604 			if (copy_from_user(&sigset, sigmask_arg->sigset,
4605 					   sizeof(sigset)))
4606 				goto out;
4607 			p = &sigset;
4608 		}
4609 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4610 		break;
4611 	}
4612 	case KVM_GET_FPU: {
4613 		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
4614 		r = -ENOMEM;
4615 		if (!fpu)
4616 			goto out;
4617 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4618 		if (r)
4619 			goto out;
4620 		r = -EFAULT;
4621 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4622 			goto out;
4623 		r = 0;
4624 		break;
4625 	}
4626 	case KVM_SET_FPU: {
4627 		fpu = memdup_user(argp, sizeof(*fpu));
4628 		if (IS_ERR(fpu)) {
4629 			r = PTR_ERR(fpu);
4630 			fpu = NULL;
4631 			goto out;
4632 		}
4633 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4634 		break;
4635 	}
4636 	case KVM_GET_STATS_FD: {
4637 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4638 		break;
4639 	}
4640 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
4641 	case KVM_PRE_FAULT_MEMORY: {
4642 		struct kvm_pre_fault_memory range;
4643 
4644 		r = -EFAULT;
4645 		if (copy_from_user(&range, argp, sizeof(range)))
4646 			break;
4647 		r = kvm_vcpu_pre_fault_memory(vcpu, &range);
4648 		/* Pass back leftover range. */
4649 		if (copy_to_user(argp, &range, sizeof(range)))
4650 			r = -EFAULT;
4651 		break;
4652 	}
4653 #endif
4654 	default:
4655 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4656 	}
4657 out:
4658 	mutex_unlock(&vcpu->mutex);
4659 	kfree(fpu);
4660 	kfree(kvm_sregs);
4661 	return r;
4662 }
4663 
4664 #ifdef CONFIG_KVM_COMPAT
4665 static long kvm_vcpu_compat_ioctl(struct file *filp,
4666 				  unsigned int ioctl, unsigned long arg)
4667 {
4668 	struct kvm_vcpu *vcpu = filp->private_data;
4669 	void __user *argp = compat_ptr(arg);
4670 	int r;
4671 
4672 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4673 		return -EIO;
4674 
4675 	switch (ioctl) {
4676 	case KVM_SET_SIGNAL_MASK: {
4677 		struct kvm_signal_mask __user *sigmask_arg = argp;
4678 		struct kvm_signal_mask kvm_sigmask;
4679 		sigset_t sigset;
4680 
4681 		if (argp) {
4682 			r = -EFAULT;
4683 			if (copy_from_user(&kvm_sigmask, argp,
4684 					   sizeof(kvm_sigmask)))
4685 				goto out;
4686 			r = -EINVAL;
4687 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4688 				goto out;
4689 			r = -EFAULT;
4690 			if (get_compat_sigset(&sigset,
4691 					      (compat_sigset_t __user *)sigmask_arg->sigset))
4692 				goto out;
4693 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4694 		} else
4695 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4696 		break;
4697 	}
4698 	default:
4699 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4700 	}
4701 
4702 out:
4703 	return r;
4704 }
4705 #endif
4706 
4707 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4708 {
4709 	struct kvm_device *dev = filp->private_data;
4710 
4711 	if (dev->ops->mmap)
4712 		return dev->ops->mmap(dev, vma);
4713 
4714 	return -ENODEV;
4715 }
4716 
4717 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4718 				 int (*accessor)(struct kvm_device *dev,
4719 						 struct kvm_device_attr *attr),
4720 				 unsigned long arg)
4721 {
4722 	struct kvm_device_attr attr;
4723 
4724 	if (!accessor)
4725 		return -EPERM;
4726 
4727 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4728 		return -EFAULT;
4729 
4730 	return accessor(dev, &attr);
4731 }
4732 
4733 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4734 			     unsigned long arg)
4735 {
4736 	struct kvm_device *dev = filp->private_data;
4737 
4738 	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4739 		return -EIO;
4740 
4741 	switch (ioctl) {
4742 	case KVM_SET_DEVICE_ATTR:
4743 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4744 	case KVM_GET_DEVICE_ATTR:
4745 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4746 	case KVM_HAS_DEVICE_ATTR:
4747 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4748 	default:
4749 		if (dev->ops->ioctl)
4750 			return dev->ops->ioctl(dev, ioctl, arg);
4751 
4752 		return -ENOTTY;
4753 	}
4754 }
4755 
4756 static int kvm_device_release(struct inode *inode, struct file *filp)
4757 {
4758 	struct kvm_device *dev = filp->private_data;
4759 	struct kvm *kvm = dev->kvm;
4760 
4761 	if (dev->ops->release) {
4762 		mutex_lock(&kvm->lock);
4763 		list_del_rcu(&dev->vm_node);
4764 		synchronize_rcu();
4765 		dev->ops->release(dev);
4766 		mutex_unlock(&kvm->lock);
4767 	}
4768 
4769 	kvm_put_kvm(kvm);
4770 	return 0;
4771 }
4772 
4773 static struct file_operations kvm_device_fops = {
4774 	.unlocked_ioctl = kvm_device_ioctl,
4775 	.release = kvm_device_release,
4776 	KVM_COMPAT(kvm_device_ioctl),
4777 	.mmap = kvm_device_mmap,
4778 };
4779 
4780 struct kvm_device *kvm_device_from_filp(struct file *filp)
4781 {
4782 	if (filp->f_op != &kvm_device_fops)
4783 		return NULL;
4784 
4785 	return filp->private_data;
4786 }
4787 
4788 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4789 #ifdef CONFIG_KVM_MPIC
4790 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4791 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4792 #endif
4793 };
4794 
4795 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4796 {
4797 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4798 		return -ENOSPC;
4799 
4800 	if (kvm_device_ops_table[type] != NULL)
4801 		return -EEXIST;
4802 
4803 	kvm_device_ops_table[type] = ops;
4804 	return 0;
4805 }
4806 
4807 void kvm_unregister_device_ops(u32 type)
4808 {
4809 	if (kvm_device_ops_table[type] != NULL)
4810 		kvm_device_ops_table[type] = NULL;
4811 }
4812 
4813 static int kvm_ioctl_create_device(struct kvm *kvm,
4814 				   struct kvm_create_device *cd)
4815 {
4816 	const struct kvm_device_ops *ops;
4817 	struct kvm_device *dev;
4818 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4819 	int type;
4820 	int ret;
4821 
4822 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4823 		return -ENODEV;
4824 
4825 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4826 	ops = kvm_device_ops_table[type];
4827 	if (ops == NULL)
4828 		return -ENODEV;
4829 
4830 	if (test)
4831 		return 0;
4832 
4833 	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4834 	if (!dev)
4835 		return -ENOMEM;
4836 
4837 	dev->ops = ops;
4838 	dev->kvm = kvm;
4839 
4840 	mutex_lock(&kvm->lock);
4841 	ret = ops->create(dev, type);
4842 	if (ret < 0) {
4843 		mutex_unlock(&kvm->lock);
4844 		kfree(dev);
4845 		return ret;
4846 	}
4847 	list_add_rcu(&dev->vm_node, &kvm->devices);
4848 	mutex_unlock(&kvm->lock);
4849 
4850 	if (ops->init)
4851 		ops->init(dev);
4852 
4853 	kvm_get_kvm(kvm);
4854 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4855 	if (ret < 0) {
4856 		kvm_put_kvm_no_destroy(kvm);
4857 		mutex_lock(&kvm->lock);
4858 		list_del_rcu(&dev->vm_node);
4859 		synchronize_rcu();
4860 		if (ops->release)
4861 			ops->release(dev);
4862 		mutex_unlock(&kvm->lock);
4863 		if (ops->destroy)
4864 			ops->destroy(dev);
4865 		return ret;
4866 	}
4867 
4868 	cd->fd = ret;
4869 	return 0;
4870 }
4871 
4872 static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4873 {
4874 	switch (arg) {
4875 	case KVM_CAP_USER_MEMORY:
4876 	case KVM_CAP_USER_MEMORY2:
4877 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4878 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4879 	case KVM_CAP_INTERNAL_ERROR_DATA:
4880 #ifdef CONFIG_HAVE_KVM_MSI
4881 	case KVM_CAP_SIGNAL_MSI:
4882 #endif
4883 #ifdef CONFIG_HAVE_KVM_IRQCHIP
4884 	case KVM_CAP_IRQFD:
4885 #endif
4886 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4887 	case KVM_CAP_CHECK_EXTENSION_VM:
4888 	case KVM_CAP_ENABLE_CAP_VM:
4889 	case KVM_CAP_HALT_POLL:
4890 		return 1;
4891 #ifdef CONFIG_KVM_MMIO
4892 	case KVM_CAP_COALESCED_MMIO:
4893 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4894 	case KVM_CAP_COALESCED_PIO:
4895 		return 1;
4896 #endif
4897 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4898 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4899 		return KVM_DIRTY_LOG_MANUAL_CAPS;
4900 #endif
4901 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4902 	case KVM_CAP_IRQ_ROUTING:
4903 		return KVM_MAX_IRQ_ROUTES;
4904 #endif
4905 #if KVM_MAX_NR_ADDRESS_SPACES > 1
4906 	case KVM_CAP_MULTI_ADDRESS_SPACE:
4907 		if (kvm)
4908 			return kvm_arch_nr_memslot_as_ids(kvm);
4909 		return KVM_MAX_NR_ADDRESS_SPACES;
4910 #endif
4911 	case KVM_CAP_NR_MEMSLOTS:
4912 		return KVM_USER_MEM_SLOTS;
4913 	case KVM_CAP_DIRTY_LOG_RING:
4914 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4915 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4916 #else
4917 		return 0;
4918 #endif
4919 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4920 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4921 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4922 #else
4923 		return 0;
4924 #endif
4925 #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4926 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4927 #endif
4928 	case KVM_CAP_BINARY_STATS_FD:
4929 	case KVM_CAP_SYSTEM_EVENT_DATA:
4930 	case KVM_CAP_DEVICE_CTRL:
4931 		return 1;
4932 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
4933 	case KVM_CAP_MEMORY_ATTRIBUTES:
4934 		return kvm_supported_mem_attributes(kvm);
4935 #endif
4936 #ifdef CONFIG_KVM_PRIVATE_MEM
4937 	case KVM_CAP_GUEST_MEMFD:
4938 		return !kvm || kvm_arch_has_private_mem(kvm);
4939 #endif
4940 	default:
4941 		break;
4942 	}
4943 	return kvm_vm_ioctl_check_extension(kvm, arg);
4944 }
4945 
4946 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4947 {
4948 	int r;
4949 
4950 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4951 		return -EINVAL;
4952 
4953 	/* the size should be power of 2 */
4954 	if (!size || (size & (size - 1)))
4955 		return -EINVAL;
4956 
4957 	/* Should be bigger to keep the reserved entries, or a page */
4958 	if (size < kvm_dirty_ring_get_rsvd_entries() *
4959 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4960 		return -EINVAL;
4961 
4962 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4963 	    sizeof(struct kvm_dirty_gfn))
4964 		return -E2BIG;
4965 
4966 	/* We only allow it to set once */
4967 	if (kvm->dirty_ring_size)
4968 		return -EINVAL;
4969 
4970 	mutex_lock(&kvm->lock);
4971 
4972 	if (kvm->created_vcpus) {
4973 		/* We don't allow to change this value after vcpu created */
4974 		r = -EINVAL;
4975 	} else {
4976 		kvm->dirty_ring_size = size;
4977 		r = 0;
4978 	}
4979 
4980 	mutex_unlock(&kvm->lock);
4981 	return r;
4982 }
4983 
4984 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4985 {
4986 	unsigned long i;
4987 	struct kvm_vcpu *vcpu;
4988 	int cleared = 0;
4989 
4990 	if (!kvm->dirty_ring_size)
4991 		return -EINVAL;
4992 
4993 	mutex_lock(&kvm->slots_lock);
4994 
4995 	kvm_for_each_vcpu(i, vcpu, kvm)
4996 		cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4997 
4998 	mutex_unlock(&kvm->slots_lock);
4999 
5000 	if (cleared)
5001 		kvm_flush_remote_tlbs(kvm);
5002 
5003 	return cleared;
5004 }
5005 
5006 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
5007 						  struct kvm_enable_cap *cap)
5008 {
5009 	return -EINVAL;
5010 }
5011 
5012 bool kvm_are_all_memslots_empty(struct kvm *kvm)
5013 {
5014 	int i;
5015 
5016 	lockdep_assert_held(&kvm->slots_lock);
5017 
5018 	for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
5019 		if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
5020 			return false;
5021 	}
5022 
5023 	return true;
5024 }
5025 EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
5026 
5027 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
5028 					   struct kvm_enable_cap *cap)
5029 {
5030 	switch (cap->cap) {
5031 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5032 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
5033 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
5034 
5035 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
5036 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
5037 
5038 		if (cap->flags || (cap->args[0] & ~allowed_options))
5039 			return -EINVAL;
5040 		kvm->manual_dirty_log_protect = cap->args[0];
5041 		return 0;
5042 	}
5043 #endif
5044 	case KVM_CAP_HALT_POLL: {
5045 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
5046 			return -EINVAL;
5047 
5048 		kvm->max_halt_poll_ns = cap->args[0];
5049 
5050 		/*
5051 		 * Ensure kvm->override_halt_poll_ns does not become visible
5052 		 * before kvm->max_halt_poll_ns.
5053 		 *
5054 		 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
5055 		 */
5056 		smp_wmb();
5057 		kvm->override_halt_poll_ns = true;
5058 
5059 		return 0;
5060 	}
5061 	case KVM_CAP_DIRTY_LOG_RING:
5062 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
5063 		if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
5064 			return -EINVAL;
5065 
5066 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
5067 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
5068 		int r = -EINVAL;
5069 
5070 		if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
5071 		    !kvm->dirty_ring_size || cap->flags)
5072 			return r;
5073 
5074 		mutex_lock(&kvm->slots_lock);
5075 
5076 		/*
5077 		 * For simplicity, allow enabling ring+bitmap if and only if
5078 		 * there are no memslots, e.g. to ensure all memslots allocate
5079 		 * a bitmap after the capability is enabled.
5080 		 */
5081 		if (kvm_are_all_memslots_empty(kvm)) {
5082 			kvm->dirty_ring_with_bitmap = true;
5083 			r = 0;
5084 		}
5085 
5086 		mutex_unlock(&kvm->slots_lock);
5087 
5088 		return r;
5089 	}
5090 	default:
5091 		return kvm_vm_ioctl_enable_cap(kvm, cap);
5092 	}
5093 }
5094 
5095 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
5096 			      size_t size, loff_t *offset)
5097 {
5098 	struct kvm *kvm = file->private_data;
5099 
5100 	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
5101 				&kvm_vm_stats_desc[0], &kvm->stat,
5102 				sizeof(kvm->stat), user_buffer, size, offset);
5103 }
5104 
5105 static int kvm_vm_stats_release(struct inode *inode, struct file *file)
5106 {
5107 	struct kvm *kvm = file->private_data;
5108 
5109 	kvm_put_kvm(kvm);
5110 	return 0;
5111 }
5112 
5113 static const struct file_operations kvm_vm_stats_fops = {
5114 	.owner = THIS_MODULE,
5115 	.read = kvm_vm_stats_read,
5116 	.release = kvm_vm_stats_release,
5117 	.llseek = noop_llseek,
5118 };
5119 
5120 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
5121 {
5122 	int fd;
5123 	struct file *file;
5124 
5125 	fd = get_unused_fd_flags(O_CLOEXEC);
5126 	if (fd < 0)
5127 		return fd;
5128 
5129 	file = anon_inode_getfile("kvm-vm-stats",
5130 			&kvm_vm_stats_fops, kvm, O_RDONLY);
5131 	if (IS_ERR(file)) {
5132 		put_unused_fd(fd);
5133 		return PTR_ERR(file);
5134 	}
5135 
5136 	kvm_get_kvm(kvm);
5137 
5138 	file->f_mode |= FMODE_PREAD;
5139 	fd_install(fd, file);
5140 
5141 	return fd;
5142 }
5143 
5144 #define SANITY_CHECK_MEM_REGION_FIELD(field)					\
5145 do {										\
5146 	BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=		\
5147 		     offsetof(struct kvm_userspace_memory_region2, field));	\
5148 	BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) !=		\
5149 		     sizeof_field(struct kvm_userspace_memory_region2, field));	\
5150 } while (0)
5151 
5152 static long kvm_vm_ioctl(struct file *filp,
5153 			   unsigned int ioctl, unsigned long arg)
5154 {
5155 	struct kvm *kvm = filp->private_data;
5156 	void __user *argp = (void __user *)arg;
5157 	int r;
5158 
5159 	if (kvm->mm != current->mm || kvm->vm_dead)
5160 		return -EIO;
5161 	switch (ioctl) {
5162 	case KVM_CREATE_VCPU:
5163 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
5164 		break;
5165 	case KVM_ENABLE_CAP: {
5166 		struct kvm_enable_cap cap;
5167 
5168 		r = -EFAULT;
5169 		if (copy_from_user(&cap, argp, sizeof(cap)))
5170 			goto out;
5171 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
5172 		break;
5173 	}
5174 	case KVM_SET_USER_MEMORY_REGION2:
5175 	case KVM_SET_USER_MEMORY_REGION: {
5176 		struct kvm_userspace_memory_region2 mem;
5177 		unsigned long size;
5178 
5179 		if (ioctl == KVM_SET_USER_MEMORY_REGION) {
5180 			/*
5181 			 * Fields beyond struct kvm_userspace_memory_region shouldn't be
5182 			 * accessed, but avoid leaking kernel memory in case of a bug.
5183 			 */
5184 			memset(&mem, 0, sizeof(mem));
5185 			size = sizeof(struct kvm_userspace_memory_region);
5186 		} else {
5187 			size = sizeof(struct kvm_userspace_memory_region2);
5188 		}
5189 
5190 		/* Ensure the common parts of the two structs are identical. */
5191 		SANITY_CHECK_MEM_REGION_FIELD(slot);
5192 		SANITY_CHECK_MEM_REGION_FIELD(flags);
5193 		SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
5194 		SANITY_CHECK_MEM_REGION_FIELD(memory_size);
5195 		SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
5196 
5197 		r = -EFAULT;
5198 		if (copy_from_user(&mem, argp, size))
5199 			goto out;
5200 
5201 		r = -EINVAL;
5202 		if (ioctl == KVM_SET_USER_MEMORY_REGION &&
5203 		    (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
5204 			goto out;
5205 
5206 		r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
5207 		break;
5208 	}
5209 	case KVM_GET_DIRTY_LOG: {
5210 		struct kvm_dirty_log log;
5211 
5212 		r = -EFAULT;
5213 		if (copy_from_user(&log, argp, sizeof(log)))
5214 			goto out;
5215 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5216 		break;
5217 	}
5218 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5219 	case KVM_CLEAR_DIRTY_LOG: {
5220 		struct kvm_clear_dirty_log log;
5221 
5222 		r = -EFAULT;
5223 		if (copy_from_user(&log, argp, sizeof(log)))
5224 			goto out;
5225 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5226 		break;
5227 	}
5228 #endif
5229 #ifdef CONFIG_KVM_MMIO
5230 	case KVM_REGISTER_COALESCED_MMIO: {
5231 		struct kvm_coalesced_mmio_zone zone;
5232 
5233 		r = -EFAULT;
5234 		if (copy_from_user(&zone, argp, sizeof(zone)))
5235 			goto out;
5236 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5237 		break;
5238 	}
5239 	case KVM_UNREGISTER_COALESCED_MMIO: {
5240 		struct kvm_coalesced_mmio_zone zone;
5241 
5242 		r = -EFAULT;
5243 		if (copy_from_user(&zone, argp, sizeof(zone)))
5244 			goto out;
5245 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5246 		break;
5247 	}
5248 #endif
5249 	case KVM_IRQFD: {
5250 		struct kvm_irqfd data;
5251 
5252 		r = -EFAULT;
5253 		if (copy_from_user(&data, argp, sizeof(data)))
5254 			goto out;
5255 		r = kvm_irqfd(kvm, &data);
5256 		break;
5257 	}
5258 	case KVM_IOEVENTFD: {
5259 		struct kvm_ioeventfd data;
5260 
5261 		r = -EFAULT;
5262 		if (copy_from_user(&data, argp, sizeof(data)))
5263 			goto out;
5264 		r = kvm_ioeventfd(kvm, &data);
5265 		break;
5266 	}
5267 #ifdef CONFIG_HAVE_KVM_MSI
5268 	case KVM_SIGNAL_MSI: {
5269 		struct kvm_msi msi;
5270 
5271 		r = -EFAULT;
5272 		if (copy_from_user(&msi, argp, sizeof(msi)))
5273 			goto out;
5274 		r = kvm_send_userspace_msi(kvm, &msi);
5275 		break;
5276 	}
5277 #endif
5278 #ifdef __KVM_HAVE_IRQ_LINE
5279 	case KVM_IRQ_LINE_STATUS:
5280 	case KVM_IRQ_LINE: {
5281 		struct kvm_irq_level irq_event;
5282 
5283 		r = -EFAULT;
5284 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
5285 			goto out;
5286 
5287 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
5288 					ioctl == KVM_IRQ_LINE_STATUS);
5289 		if (r)
5290 			goto out;
5291 
5292 		r = -EFAULT;
5293 		if (ioctl == KVM_IRQ_LINE_STATUS) {
5294 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
5295 				goto out;
5296 		}
5297 
5298 		r = 0;
5299 		break;
5300 	}
5301 #endif
5302 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
5303 	case KVM_SET_GSI_ROUTING: {
5304 		struct kvm_irq_routing routing;
5305 		struct kvm_irq_routing __user *urouting;
5306 		struct kvm_irq_routing_entry *entries = NULL;
5307 
5308 		r = -EFAULT;
5309 		if (copy_from_user(&routing, argp, sizeof(routing)))
5310 			goto out;
5311 		r = -EINVAL;
5312 		if (!kvm_arch_can_set_irq_routing(kvm))
5313 			goto out;
5314 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
5315 			goto out;
5316 		if (routing.flags)
5317 			goto out;
5318 		if (routing.nr) {
5319 			urouting = argp;
5320 			entries = vmemdup_array_user(urouting->entries,
5321 						     routing.nr, sizeof(*entries));
5322 			if (IS_ERR(entries)) {
5323 				r = PTR_ERR(entries);
5324 				goto out;
5325 			}
5326 		}
5327 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
5328 					routing.flags);
5329 		kvfree(entries);
5330 		break;
5331 	}
5332 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
5333 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
5334 	case KVM_SET_MEMORY_ATTRIBUTES: {
5335 		struct kvm_memory_attributes attrs;
5336 
5337 		r = -EFAULT;
5338 		if (copy_from_user(&attrs, argp, sizeof(attrs)))
5339 			goto out;
5340 
5341 		r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
5342 		break;
5343 	}
5344 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
5345 	case KVM_CREATE_DEVICE: {
5346 		struct kvm_create_device cd;
5347 
5348 		r = -EFAULT;
5349 		if (copy_from_user(&cd, argp, sizeof(cd)))
5350 			goto out;
5351 
5352 		r = kvm_ioctl_create_device(kvm, &cd);
5353 		if (r)
5354 			goto out;
5355 
5356 		r = -EFAULT;
5357 		if (copy_to_user(argp, &cd, sizeof(cd)))
5358 			goto out;
5359 
5360 		r = 0;
5361 		break;
5362 	}
5363 	case KVM_CHECK_EXTENSION:
5364 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
5365 		break;
5366 	case KVM_RESET_DIRTY_RINGS:
5367 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
5368 		break;
5369 	case KVM_GET_STATS_FD:
5370 		r = kvm_vm_ioctl_get_stats_fd(kvm);
5371 		break;
5372 #ifdef CONFIG_KVM_PRIVATE_MEM
5373 	case KVM_CREATE_GUEST_MEMFD: {
5374 		struct kvm_create_guest_memfd guest_memfd;
5375 
5376 		r = -EFAULT;
5377 		if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
5378 			goto out;
5379 
5380 		r = kvm_gmem_create(kvm, &guest_memfd);
5381 		break;
5382 	}
5383 #endif
5384 	default:
5385 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
5386 	}
5387 out:
5388 	return r;
5389 }
5390 
5391 #ifdef CONFIG_KVM_COMPAT
5392 struct compat_kvm_dirty_log {
5393 	__u32 slot;
5394 	__u32 padding1;
5395 	union {
5396 		compat_uptr_t dirty_bitmap; /* one bit per page */
5397 		__u64 padding2;
5398 	};
5399 };
5400 
5401 struct compat_kvm_clear_dirty_log {
5402 	__u32 slot;
5403 	__u32 num_pages;
5404 	__u64 first_page;
5405 	union {
5406 		compat_uptr_t dirty_bitmap; /* one bit per page */
5407 		__u64 padding2;
5408 	};
5409 };
5410 
5411 long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
5412 				     unsigned long arg)
5413 {
5414 	return -ENOTTY;
5415 }
5416 
5417 static long kvm_vm_compat_ioctl(struct file *filp,
5418 			   unsigned int ioctl, unsigned long arg)
5419 {
5420 	struct kvm *kvm = filp->private_data;
5421 	int r;
5422 
5423 	if (kvm->mm != current->mm || kvm->vm_dead)
5424 		return -EIO;
5425 
5426 	r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
5427 	if (r != -ENOTTY)
5428 		return r;
5429 
5430 	switch (ioctl) {
5431 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
5432 	case KVM_CLEAR_DIRTY_LOG: {
5433 		struct compat_kvm_clear_dirty_log compat_log;
5434 		struct kvm_clear_dirty_log log;
5435 
5436 		if (copy_from_user(&compat_log, (void __user *)arg,
5437 				   sizeof(compat_log)))
5438 			return -EFAULT;
5439 		log.slot	 = compat_log.slot;
5440 		log.num_pages	 = compat_log.num_pages;
5441 		log.first_page	 = compat_log.first_page;
5442 		log.padding2	 = compat_log.padding2;
5443 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5444 
5445 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
5446 		break;
5447 	}
5448 #endif
5449 	case KVM_GET_DIRTY_LOG: {
5450 		struct compat_kvm_dirty_log compat_log;
5451 		struct kvm_dirty_log log;
5452 
5453 		if (copy_from_user(&compat_log, (void __user *)arg,
5454 				   sizeof(compat_log)))
5455 			return -EFAULT;
5456 		log.slot	 = compat_log.slot;
5457 		log.padding1	 = compat_log.padding1;
5458 		log.padding2	 = compat_log.padding2;
5459 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
5460 
5461 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
5462 		break;
5463 	}
5464 	default:
5465 		r = kvm_vm_ioctl(filp, ioctl, arg);
5466 	}
5467 	return r;
5468 }
5469 #endif
5470 
5471 static struct file_operations kvm_vm_fops = {
5472 	.release        = kvm_vm_release,
5473 	.unlocked_ioctl = kvm_vm_ioctl,
5474 	.llseek		= noop_llseek,
5475 	KVM_COMPAT(kvm_vm_compat_ioctl),
5476 };
5477 
5478 bool file_is_kvm(struct file *file)
5479 {
5480 	return file && file->f_op == &kvm_vm_fops;
5481 }
5482 EXPORT_SYMBOL_GPL(file_is_kvm);
5483 
5484 static int kvm_dev_ioctl_create_vm(unsigned long type)
5485 {
5486 	char fdname[ITOA_MAX_LEN + 1];
5487 	int r, fd;
5488 	struct kvm *kvm;
5489 	struct file *file;
5490 
5491 	fd = get_unused_fd_flags(O_CLOEXEC);
5492 	if (fd < 0)
5493 		return fd;
5494 
5495 	snprintf(fdname, sizeof(fdname), "%d", fd);
5496 
5497 	kvm = kvm_create_vm(type, fdname);
5498 	if (IS_ERR(kvm)) {
5499 		r = PTR_ERR(kvm);
5500 		goto put_fd;
5501 	}
5502 
5503 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5504 	if (IS_ERR(file)) {
5505 		r = PTR_ERR(file);
5506 		goto put_kvm;
5507 	}
5508 
5509 	/*
5510 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
5511 	 * already set, with ->release() being kvm_vm_release().  In error
5512 	 * cases it will be called by the final fput(file) and will take
5513 	 * care of doing kvm_put_kvm(kvm).
5514 	 */
5515 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5516 
5517 	fd_install(fd, file);
5518 	return fd;
5519 
5520 put_kvm:
5521 	kvm_put_kvm(kvm);
5522 put_fd:
5523 	put_unused_fd(fd);
5524 	return r;
5525 }
5526 
5527 static long kvm_dev_ioctl(struct file *filp,
5528 			  unsigned int ioctl, unsigned long arg)
5529 {
5530 	int r = -EINVAL;
5531 
5532 	switch (ioctl) {
5533 	case KVM_GET_API_VERSION:
5534 		if (arg)
5535 			goto out;
5536 		r = KVM_API_VERSION;
5537 		break;
5538 	case KVM_CREATE_VM:
5539 		r = kvm_dev_ioctl_create_vm(arg);
5540 		break;
5541 	case KVM_CHECK_EXTENSION:
5542 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5543 		break;
5544 	case KVM_GET_VCPU_MMAP_SIZE:
5545 		if (arg)
5546 			goto out;
5547 		r = PAGE_SIZE;     /* struct kvm_run */
5548 #ifdef CONFIG_X86
5549 		r += PAGE_SIZE;    /* pio data page */
5550 #endif
5551 #ifdef CONFIG_KVM_MMIO
5552 		r += PAGE_SIZE;    /* coalesced mmio ring page */
5553 #endif
5554 		break;
5555 	default:
5556 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
5557 	}
5558 out:
5559 	return r;
5560 }
5561 
5562 static struct file_operations kvm_chardev_ops = {
5563 	.unlocked_ioctl = kvm_dev_ioctl,
5564 	.llseek		= noop_llseek,
5565 	KVM_COMPAT(kvm_dev_ioctl),
5566 };
5567 
5568 static struct miscdevice kvm_dev = {
5569 	KVM_MINOR,
5570 	"kvm",
5571 	&kvm_chardev_ops,
5572 };
5573 
5574 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
5575 __visible bool kvm_rebooting;
5576 EXPORT_SYMBOL_GPL(kvm_rebooting);
5577 
5578 static DEFINE_PER_CPU(bool, hardware_enabled);
5579 static int kvm_usage_count;
5580 
5581 static int __hardware_enable_nolock(void)
5582 {
5583 	if (__this_cpu_read(hardware_enabled))
5584 		return 0;
5585 
5586 	if (kvm_arch_hardware_enable()) {
5587 		pr_info("kvm: enabling virtualization on CPU%d failed\n",
5588 			raw_smp_processor_id());
5589 		return -EIO;
5590 	}
5591 
5592 	__this_cpu_write(hardware_enabled, true);
5593 	return 0;
5594 }
5595 
5596 static void hardware_enable_nolock(void *failed)
5597 {
5598 	if (__hardware_enable_nolock())
5599 		atomic_inc(failed);
5600 }
5601 
5602 static int kvm_online_cpu(unsigned int cpu)
5603 {
5604 	int ret = 0;
5605 
5606 	/*
5607 	 * Abort the CPU online process if hardware virtualization cannot
5608 	 * be enabled. Otherwise running VMs would encounter unrecoverable
5609 	 * errors when scheduled to this CPU.
5610 	 */
5611 	mutex_lock(&kvm_lock);
5612 	if (kvm_usage_count)
5613 		ret = __hardware_enable_nolock();
5614 	mutex_unlock(&kvm_lock);
5615 	return ret;
5616 }
5617 
5618 static void hardware_disable_nolock(void *junk)
5619 {
5620 	/*
5621 	 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
5622 	 * hardware, not just CPUs that successfully enabled hardware!
5623 	 */
5624 	if (!__this_cpu_read(hardware_enabled))
5625 		return;
5626 
5627 	kvm_arch_hardware_disable();
5628 
5629 	__this_cpu_write(hardware_enabled, false);
5630 }
5631 
5632 static int kvm_offline_cpu(unsigned int cpu)
5633 {
5634 	mutex_lock(&kvm_lock);
5635 	if (kvm_usage_count)
5636 		hardware_disable_nolock(NULL);
5637 	mutex_unlock(&kvm_lock);
5638 	return 0;
5639 }
5640 
5641 static void hardware_disable_all_nolock(void)
5642 {
5643 	BUG_ON(!kvm_usage_count);
5644 
5645 	kvm_usage_count--;
5646 	if (!kvm_usage_count)
5647 		on_each_cpu(hardware_disable_nolock, NULL, 1);
5648 }
5649 
5650 static void hardware_disable_all(void)
5651 {
5652 	cpus_read_lock();
5653 	mutex_lock(&kvm_lock);
5654 	hardware_disable_all_nolock();
5655 	mutex_unlock(&kvm_lock);
5656 	cpus_read_unlock();
5657 }
5658 
5659 static int hardware_enable_all(void)
5660 {
5661 	atomic_t failed = ATOMIC_INIT(0);
5662 	int r;
5663 
5664 	/*
5665 	 * Do not enable hardware virtualization if the system is going down.
5666 	 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
5667 	 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5668 	 * after kvm_reboot() is called.  Note, this relies on system_state
5669 	 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
5670 	 * hook instead of registering a dedicated reboot notifier (the latter
5671 	 * runs before system_state is updated).
5672 	 */
5673 	if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
5674 	    system_state == SYSTEM_RESTART)
5675 		return -EBUSY;
5676 
5677 	/*
5678 	 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
5679 	 * is called, and so on_each_cpu() between them includes the CPU that
5680 	 * is being onlined.  As a result, hardware_enable_nolock() may get
5681 	 * invoked before kvm_online_cpu(), which also enables hardware if the
5682 	 * usage count is non-zero.  Disable CPU hotplug to avoid attempting to
5683 	 * enable hardware multiple times.
5684 	 */
5685 	cpus_read_lock();
5686 	mutex_lock(&kvm_lock);
5687 
5688 	r = 0;
5689 
5690 	kvm_usage_count++;
5691 	if (kvm_usage_count == 1) {
5692 		on_each_cpu(hardware_enable_nolock, &failed, 1);
5693 
5694 		if (atomic_read(&failed)) {
5695 			hardware_disable_all_nolock();
5696 			r = -EBUSY;
5697 		}
5698 	}
5699 
5700 	mutex_unlock(&kvm_lock);
5701 	cpus_read_unlock();
5702 
5703 	return r;
5704 }
5705 
5706 static void kvm_shutdown(void)
5707 {
5708 	/*
5709 	 * Disable hardware virtualization and set kvm_rebooting to indicate
5710 	 * that KVM has asynchronously disabled hardware virtualization, i.e.
5711 	 * that relevant errors and exceptions aren't entirely unexpected.
5712 	 * Some flavors of hardware virtualization need to be disabled before
5713 	 * transferring control to firmware (to perform shutdown/reboot), e.g.
5714 	 * on x86, virtualization can block INIT interrupts, which are used by
5715 	 * firmware to pull APs back under firmware control.  Note, this path
5716 	 * is used for both shutdown and reboot scenarios, i.e. neither name is
5717 	 * 100% comprehensive.
5718 	 */
5719 	pr_info("kvm: exiting hardware virtualization\n");
5720 	kvm_rebooting = true;
5721 	on_each_cpu(hardware_disable_nolock, NULL, 1);
5722 }
5723 
5724 static int kvm_suspend(void)
5725 {
5726 	/*
5727 	 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
5728 	 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
5729 	 * is stable.  Assert that kvm_lock is not held to ensure the system
5730 	 * isn't suspended while KVM is enabling hardware.  Hardware enabling
5731 	 * can be preempted, but the task cannot be frozen until it has dropped
5732 	 * all locks (userspace tasks are frozen via a fake signal).
5733 	 */
5734 	lockdep_assert_not_held(&kvm_lock);
5735 	lockdep_assert_irqs_disabled();
5736 
5737 	if (kvm_usage_count)
5738 		hardware_disable_nolock(NULL);
5739 	return 0;
5740 }
5741 
5742 static void kvm_resume(void)
5743 {
5744 	lockdep_assert_not_held(&kvm_lock);
5745 	lockdep_assert_irqs_disabled();
5746 
5747 	if (kvm_usage_count)
5748 		WARN_ON_ONCE(__hardware_enable_nolock());
5749 }
5750 
5751 static struct syscore_ops kvm_syscore_ops = {
5752 	.suspend = kvm_suspend,
5753 	.resume = kvm_resume,
5754 	.shutdown = kvm_shutdown,
5755 };
5756 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5757 static int hardware_enable_all(void)
5758 {
5759 	return 0;
5760 }
5761 
5762 static void hardware_disable_all(void)
5763 {
5764 
5765 }
5766 #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5767 
5768 static void kvm_iodevice_destructor(struct kvm_io_device *dev)
5769 {
5770 	if (dev->ops->destructor)
5771 		dev->ops->destructor(dev);
5772 }
5773 
5774 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5775 {
5776 	int i;
5777 
5778 	for (i = 0; i < bus->dev_count; i++) {
5779 		struct kvm_io_device *pos = bus->range[i].dev;
5780 
5781 		kvm_iodevice_destructor(pos);
5782 	}
5783 	kfree(bus);
5784 }
5785 
5786 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5787 				 const struct kvm_io_range *r2)
5788 {
5789 	gpa_t addr1 = r1->addr;
5790 	gpa_t addr2 = r2->addr;
5791 
5792 	if (addr1 < addr2)
5793 		return -1;
5794 
5795 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
5796 	 * accept any overlapping write.  Any order is acceptable for
5797 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5798 	 * we process all of them.
5799 	 */
5800 	if (r2->len) {
5801 		addr1 += r1->len;
5802 		addr2 += r2->len;
5803 	}
5804 
5805 	if (addr1 > addr2)
5806 		return 1;
5807 
5808 	return 0;
5809 }
5810 
5811 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5812 {
5813 	return kvm_io_bus_cmp(p1, p2);
5814 }
5815 
5816 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5817 			     gpa_t addr, int len)
5818 {
5819 	struct kvm_io_range *range, key;
5820 	int off;
5821 
5822 	key = (struct kvm_io_range) {
5823 		.addr = addr,
5824 		.len = len,
5825 	};
5826 
5827 	range = bsearch(&key, bus->range, bus->dev_count,
5828 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5829 	if (range == NULL)
5830 		return -ENOENT;
5831 
5832 	off = range - bus->range;
5833 
5834 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5835 		off--;
5836 
5837 	return off;
5838 }
5839 
5840 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5841 			      struct kvm_io_range *range, const void *val)
5842 {
5843 	int idx;
5844 
5845 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5846 	if (idx < 0)
5847 		return -EOPNOTSUPP;
5848 
5849 	while (idx < bus->dev_count &&
5850 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5851 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5852 					range->len, val))
5853 			return idx;
5854 		idx++;
5855 	}
5856 
5857 	return -EOPNOTSUPP;
5858 }
5859 
5860 /* kvm_io_bus_write - called under kvm->slots_lock */
5861 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5862 		     int len, const void *val)
5863 {
5864 	struct kvm_io_bus *bus;
5865 	struct kvm_io_range range;
5866 	int r;
5867 
5868 	range = (struct kvm_io_range) {
5869 		.addr = addr,
5870 		.len = len,
5871 	};
5872 
5873 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5874 	if (!bus)
5875 		return -ENOMEM;
5876 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5877 	return r < 0 ? r : 0;
5878 }
5879 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5880 
5881 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5882 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5883 			    gpa_t addr, int len, const void *val, long cookie)
5884 {
5885 	struct kvm_io_bus *bus;
5886 	struct kvm_io_range range;
5887 
5888 	range = (struct kvm_io_range) {
5889 		.addr = addr,
5890 		.len = len,
5891 	};
5892 
5893 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5894 	if (!bus)
5895 		return -ENOMEM;
5896 
5897 	/* First try the device referenced by cookie. */
5898 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5899 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5900 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5901 					val))
5902 			return cookie;
5903 
5904 	/*
5905 	 * cookie contained garbage; fall back to search and return the
5906 	 * correct cookie value.
5907 	 */
5908 	return __kvm_io_bus_write(vcpu, bus, &range, val);
5909 }
5910 
5911 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5912 			     struct kvm_io_range *range, void *val)
5913 {
5914 	int idx;
5915 
5916 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5917 	if (idx < 0)
5918 		return -EOPNOTSUPP;
5919 
5920 	while (idx < bus->dev_count &&
5921 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5922 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5923 				       range->len, val))
5924 			return idx;
5925 		idx++;
5926 	}
5927 
5928 	return -EOPNOTSUPP;
5929 }
5930 
5931 /* kvm_io_bus_read - called under kvm->slots_lock */
5932 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5933 		    int len, void *val)
5934 {
5935 	struct kvm_io_bus *bus;
5936 	struct kvm_io_range range;
5937 	int r;
5938 
5939 	range = (struct kvm_io_range) {
5940 		.addr = addr,
5941 		.len = len,
5942 	};
5943 
5944 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5945 	if (!bus)
5946 		return -ENOMEM;
5947 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5948 	return r < 0 ? r : 0;
5949 }
5950 
5951 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5952 			    int len, struct kvm_io_device *dev)
5953 {
5954 	int i;
5955 	struct kvm_io_bus *new_bus, *bus;
5956 	struct kvm_io_range range;
5957 
5958 	lockdep_assert_held(&kvm->slots_lock);
5959 
5960 	bus = kvm_get_bus(kvm, bus_idx);
5961 	if (!bus)
5962 		return -ENOMEM;
5963 
5964 	/* exclude ioeventfd which is limited by maximum fd */
5965 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5966 		return -ENOSPC;
5967 
5968 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5969 			  GFP_KERNEL_ACCOUNT);
5970 	if (!new_bus)
5971 		return -ENOMEM;
5972 
5973 	range = (struct kvm_io_range) {
5974 		.addr = addr,
5975 		.len = len,
5976 		.dev = dev,
5977 	};
5978 
5979 	for (i = 0; i < bus->dev_count; i++)
5980 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5981 			break;
5982 
5983 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5984 	new_bus->dev_count++;
5985 	new_bus->range[i] = range;
5986 	memcpy(new_bus->range + i + 1, bus->range + i,
5987 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
5988 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5989 	synchronize_srcu_expedited(&kvm->srcu);
5990 	kfree(bus);
5991 
5992 	return 0;
5993 }
5994 
5995 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5996 			      struct kvm_io_device *dev)
5997 {
5998 	int i;
5999 	struct kvm_io_bus *new_bus, *bus;
6000 
6001 	lockdep_assert_held(&kvm->slots_lock);
6002 
6003 	bus = kvm_get_bus(kvm, bus_idx);
6004 	if (!bus)
6005 		return 0;
6006 
6007 	for (i = 0; i < bus->dev_count; i++) {
6008 		if (bus->range[i].dev == dev) {
6009 			break;
6010 		}
6011 	}
6012 
6013 	if (i == bus->dev_count)
6014 		return 0;
6015 
6016 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
6017 			  GFP_KERNEL_ACCOUNT);
6018 	if (new_bus) {
6019 		memcpy(new_bus, bus, struct_size(bus, range, i));
6020 		new_bus->dev_count--;
6021 		memcpy(new_bus->range + i, bus->range + i + 1,
6022 				flex_array_size(new_bus, range, new_bus->dev_count - i));
6023 	}
6024 
6025 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
6026 	synchronize_srcu_expedited(&kvm->srcu);
6027 
6028 	/*
6029 	 * If NULL bus is installed, destroy the old bus, including all the
6030 	 * attached devices. Otherwise, destroy the caller's device only.
6031 	 */
6032 	if (!new_bus) {
6033 		pr_err("kvm: failed to shrink bus, removing it completely\n");
6034 		kvm_io_bus_destroy(bus);
6035 		return -ENOMEM;
6036 	}
6037 
6038 	kvm_iodevice_destructor(dev);
6039 	kfree(bus);
6040 	return 0;
6041 }
6042 
6043 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
6044 					 gpa_t addr)
6045 {
6046 	struct kvm_io_bus *bus;
6047 	int dev_idx, srcu_idx;
6048 	struct kvm_io_device *iodev = NULL;
6049 
6050 	srcu_idx = srcu_read_lock(&kvm->srcu);
6051 
6052 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
6053 	if (!bus)
6054 		goto out_unlock;
6055 
6056 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
6057 	if (dev_idx < 0)
6058 		goto out_unlock;
6059 
6060 	iodev = bus->range[dev_idx].dev;
6061 
6062 out_unlock:
6063 	srcu_read_unlock(&kvm->srcu, srcu_idx);
6064 
6065 	return iodev;
6066 }
6067 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
6068 
6069 static int kvm_debugfs_open(struct inode *inode, struct file *file,
6070 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
6071 			   const char *fmt)
6072 {
6073 	int ret;
6074 	struct kvm_stat_data *stat_data = inode->i_private;
6075 
6076 	/*
6077 	 * The debugfs files are a reference to the kvm struct which
6078         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
6079         * avoids the race between open and the removal of the debugfs directory.
6080 	 */
6081 	if (!kvm_get_kvm_safe(stat_data->kvm))
6082 		return -ENOENT;
6083 
6084 	ret = simple_attr_open(inode, file, get,
6085 			       kvm_stats_debugfs_mode(stat_data->desc) & 0222
6086 			       ? set : NULL, fmt);
6087 	if (ret)
6088 		kvm_put_kvm(stat_data->kvm);
6089 
6090 	return ret;
6091 }
6092 
6093 static int kvm_debugfs_release(struct inode *inode, struct file *file)
6094 {
6095 	struct kvm_stat_data *stat_data = inode->i_private;
6096 
6097 	simple_attr_release(inode, file);
6098 	kvm_put_kvm(stat_data->kvm);
6099 
6100 	return 0;
6101 }
6102 
6103 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
6104 {
6105 	*val = *(u64 *)((void *)(&kvm->stat) + offset);
6106 
6107 	return 0;
6108 }
6109 
6110 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
6111 {
6112 	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
6113 
6114 	return 0;
6115 }
6116 
6117 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
6118 {
6119 	unsigned long i;
6120 	struct kvm_vcpu *vcpu;
6121 
6122 	*val = 0;
6123 
6124 	kvm_for_each_vcpu(i, vcpu, kvm)
6125 		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
6126 
6127 	return 0;
6128 }
6129 
6130 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
6131 {
6132 	unsigned long i;
6133 	struct kvm_vcpu *vcpu;
6134 
6135 	kvm_for_each_vcpu(i, vcpu, kvm)
6136 		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
6137 
6138 	return 0;
6139 }
6140 
6141 static int kvm_stat_data_get(void *data, u64 *val)
6142 {
6143 	int r = -EFAULT;
6144 	struct kvm_stat_data *stat_data = data;
6145 
6146 	switch (stat_data->kind) {
6147 	case KVM_STAT_VM:
6148 		r = kvm_get_stat_per_vm(stat_data->kvm,
6149 					stat_data->desc->desc.offset, val);
6150 		break;
6151 	case KVM_STAT_VCPU:
6152 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
6153 					  stat_data->desc->desc.offset, val);
6154 		break;
6155 	}
6156 
6157 	return r;
6158 }
6159 
6160 static int kvm_stat_data_clear(void *data, u64 val)
6161 {
6162 	int r = -EFAULT;
6163 	struct kvm_stat_data *stat_data = data;
6164 
6165 	if (val)
6166 		return -EINVAL;
6167 
6168 	switch (stat_data->kind) {
6169 	case KVM_STAT_VM:
6170 		r = kvm_clear_stat_per_vm(stat_data->kvm,
6171 					  stat_data->desc->desc.offset);
6172 		break;
6173 	case KVM_STAT_VCPU:
6174 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
6175 					    stat_data->desc->desc.offset);
6176 		break;
6177 	}
6178 
6179 	return r;
6180 }
6181 
6182 static int kvm_stat_data_open(struct inode *inode, struct file *file)
6183 {
6184 	__simple_attr_check_format("%llu\n", 0ull);
6185 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
6186 				kvm_stat_data_clear, "%llu\n");
6187 }
6188 
6189 static const struct file_operations stat_fops_per_vm = {
6190 	.owner = THIS_MODULE,
6191 	.open = kvm_stat_data_open,
6192 	.release = kvm_debugfs_release,
6193 	.read = simple_attr_read,
6194 	.write = simple_attr_write,
6195 	.llseek = no_llseek,
6196 };
6197 
6198 static int vm_stat_get(void *_offset, u64 *val)
6199 {
6200 	unsigned offset = (long)_offset;
6201 	struct kvm *kvm;
6202 	u64 tmp_val;
6203 
6204 	*val = 0;
6205 	mutex_lock(&kvm_lock);
6206 	list_for_each_entry(kvm, &vm_list, vm_list) {
6207 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
6208 		*val += tmp_val;
6209 	}
6210 	mutex_unlock(&kvm_lock);
6211 	return 0;
6212 }
6213 
6214 static int vm_stat_clear(void *_offset, u64 val)
6215 {
6216 	unsigned offset = (long)_offset;
6217 	struct kvm *kvm;
6218 
6219 	if (val)
6220 		return -EINVAL;
6221 
6222 	mutex_lock(&kvm_lock);
6223 	list_for_each_entry(kvm, &vm_list, vm_list) {
6224 		kvm_clear_stat_per_vm(kvm, offset);
6225 	}
6226 	mutex_unlock(&kvm_lock);
6227 
6228 	return 0;
6229 }
6230 
6231 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
6232 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
6233 
6234 static int vcpu_stat_get(void *_offset, u64 *val)
6235 {
6236 	unsigned offset = (long)_offset;
6237 	struct kvm *kvm;
6238 	u64 tmp_val;
6239 
6240 	*val = 0;
6241 	mutex_lock(&kvm_lock);
6242 	list_for_each_entry(kvm, &vm_list, vm_list) {
6243 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
6244 		*val += tmp_val;
6245 	}
6246 	mutex_unlock(&kvm_lock);
6247 	return 0;
6248 }
6249 
6250 static int vcpu_stat_clear(void *_offset, u64 val)
6251 {
6252 	unsigned offset = (long)_offset;
6253 	struct kvm *kvm;
6254 
6255 	if (val)
6256 		return -EINVAL;
6257 
6258 	mutex_lock(&kvm_lock);
6259 	list_for_each_entry(kvm, &vm_list, vm_list) {
6260 		kvm_clear_stat_per_vcpu(kvm, offset);
6261 	}
6262 	mutex_unlock(&kvm_lock);
6263 
6264 	return 0;
6265 }
6266 
6267 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
6268 			"%llu\n");
6269 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
6270 
6271 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
6272 {
6273 	struct kobj_uevent_env *env;
6274 	unsigned long long created, active;
6275 
6276 	if (!kvm_dev.this_device || !kvm)
6277 		return;
6278 
6279 	mutex_lock(&kvm_lock);
6280 	if (type == KVM_EVENT_CREATE_VM) {
6281 		kvm_createvm_count++;
6282 		kvm_active_vms++;
6283 	} else if (type == KVM_EVENT_DESTROY_VM) {
6284 		kvm_active_vms--;
6285 	}
6286 	created = kvm_createvm_count;
6287 	active = kvm_active_vms;
6288 	mutex_unlock(&kvm_lock);
6289 
6290 	env = kzalloc(sizeof(*env), GFP_KERNEL);
6291 	if (!env)
6292 		return;
6293 
6294 	add_uevent_var(env, "CREATED=%llu", created);
6295 	add_uevent_var(env, "COUNT=%llu", active);
6296 
6297 	if (type == KVM_EVENT_CREATE_VM) {
6298 		add_uevent_var(env, "EVENT=create");
6299 		kvm->userspace_pid = task_pid_nr(current);
6300 	} else if (type == KVM_EVENT_DESTROY_VM) {
6301 		add_uevent_var(env, "EVENT=destroy");
6302 	}
6303 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
6304 
6305 	if (!IS_ERR(kvm->debugfs_dentry)) {
6306 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
6307 
6308 		if (p) {
6309 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
6310 			if (!IS_ERR(tmp))
6311 				add_uevent_var(env, "STATS_PATH=%s", tmp);
6312 			kfree(p);
6313 		}
6314 	}
6315 	/* no need for checks, since we are adding at most only 5 keys */
6316 	env->envp[env->envp_idx++] = NULL;
6317 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
6318 	kfree(env);
6319 }
6320 
6321 static void kvm_init_debug(void)
6322 {
6323 	const struct file_operations *fops;
6324 	const struct _kvm_stats_desc *pdesc;
6325 	int i;
6326 
6327 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6328 
6329 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
6330 		pdesc = &kvm_vm_stats_desc[i];
6331 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6332 			fops = &vm_stat_fops;
6333 		else
6334 			fops = &vm_stat_readonly_fops;
6335 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6336 				kvm_debugfs_dir,
6337 				(void *)(long)pdesc->desc.offset, fops);
6338 	}
6339 
6340 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
6341 		pdesc = &kvm_vcpu_stats_desc[i];
6342 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
6343 			fops = &vcpu_stat_fops;
6344 		else
6345 			fops = &vcpu_stat_readonly_fops;
6346 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
6347 				kvm_debugfs_dir,
6348 				(void *)(long)pdesc->desc.offset, fops);
6349 	}
6350 }
6351 
6352 static inline
6353 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
6354 {
6355 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
6356 }
6357 
6358 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
6359 {
6360 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6361 
6362 	WRITE_ONCE(vcpu->preempted, false);
6363 	WRITE_ONCE(vcpu->ready, false);
6364 
6365 	__this_cpu_write(kvm_running_vcpu, vcpu);
6366 	kvm_arch_vcpu_load(vcpu, cpu);
6367 
6368 	WRITE_ONCE(vcpu->scheduled_out, false);
6369 }
6370 
6371 static void kvm_sched_out(struct preempt_notifier *pn,
6372 			  struct task_struct *next)
6373 {
6374 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
6375 
6376 	WRITE_ONCE(vcpu->scheduled_out, true);
6377 
6378 	if (current->on_rq && vcpu->wants_to_run) {
6379 		WRITE_ONCE(vcpu->preempted, true);
6380 		WRITE_ONCE(vcpu->ready, true);
6381 	}
6382 	kvm_arch_vcpu_put(vcpu);
6383 	__this_cpu_write(kvm_running_vcpu, NULL);
6384 }
6385 
6386 /**
6387  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
6388  *
6389  * We can disable preemption locally around accessing the per-CPU variable,
6390  * and use the resolved vcpu pointer after enabling preemption again,
6391  * because even if the current thread is migrated to another CPU, reading
6392  * the per-CPU value later will give us the same value as we update the
6393  * per-CPU variable in the preempt notifier handlers.
6394  */
6395 struct kvm_vcpu *kvm_get_running_vcpu(void)
6396 {
6397 	struct kvm_vcpu *vcpu;
6398 
6399 	preempt_disable();
6400 	vcpu = __this_cpu_read(kvm_running_vcpu);
6401 	preempt_enable();
6402 
6403 	return vcpu;
6404 }
6405 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
6406 
6407 /**
6408  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6409  */
6410 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6411 {
6412         return &kvm_running_vcpu;
6413 }
6414 
6415 #ifdef CONFIG_GUEST_PERF_EVENTS
6416 static unsigned int kvm_guest_state(void)
6417 {
6418 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6419 	unsigned int state;
6420 
6421 	if (!kvm_arch_pmi_in_guest(vcpu))
6422 		return 0;
6423 
6424 	state = PERF_GUEST_ACTIVE;
6425 	if (!kvm_arch_vcpu_in_kernel(vcpu))
6426 		state |= PERF_GUEST_USER;
6427 
6428 	return state;
6429 }
6430 
6431 static unsigned long kvm_guest_get_ip(void)
6432 {
6433 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6434 
6435 	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
6436 	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
6437 		return 0;
6438 
6439 	return kvm_arch_vcpu_get_ip(vcpu);
6440 }
6441 
6442 static struct perf_guest_info_callbacks kvm_guest_cbs = {
6443 	.state			= kvm_guest_state,
6444 	.get_ip			= kvm_guest_get_ip,
6445 	.handle_intel_pt_intr	= NULL,
6446 };
6447 
6448 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
6449 {
6450 	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
6451 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
6452 }
6453 void kvm_unregister_perf_callbacks(void)
6454 {
6455 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6456 }
6457 #endif
6458 
6459 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
6460 {
6461 	int r;
6462 	int cpu;
6463 
6464 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6465 	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
6466 				      kvm_online_cpu, kvm_offline_cpu);
6467 	if (r)
6468 		return r;
6469 
6470 	register_syscore_ops(&kvm_syscore_ops);
6471 #endif
6472 
6473 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
6474 	if (!vcpu_align)
6475 		vcpu_align = __alignof__(struct kvm_vcpu);
6476 	kvm_vcpu_cache =
6477 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
6478 					   SLAB_ACCOUNT,
6479 					   offsetof(struct kvm_vcpu, arch),
6480 					   offsetofend(struct kvm_vcpu, stats_id)
6481 					   - offsetof(struct kvm_vcpu, arch),
6482 					   NULL);
6483 	if (!kvm_vcpu_cache) {
6484 		r = -ENOMEM;
6485 		goto err_vcpu_cache;
6486 	}
6487 
6488 	for_each_possible_cpu(cpu) {
6489 		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
6490 					    GFP_KERNEL, cpu_to_node(cpu))) {
6491 			r = -ENOMEM;
6492 			goto err_cpu_kick_mask;
6493 		}
6494 	}
6495 
6496 	r = kvm_irqfd_init();
6497 	if (r)
6498 		goto err_irqfd;
6499 
6500 	r = kvm_async_pf_init();
6501 	if (r)
6502 		goto err_async_pf;
6503 
6504 	kvm_chardev_ops.owner = module;
6505 	kvm_vm_fops.owner = module;
6506 	kvm_vcpu_fops.owner = module;
6507 	kvm_device_fops.owner = module;
6508 
6509 	kvm_preempt_ops.sched_in = kvm_sched_in;
6510 	kvm_preempt_ops.sched_out = kvm_sched_out;
6511 
6512 	kvm_init_debug();
6513 
6514 	r = kvm_vfio_ops_init();
6515 	if (WARN_ON_ONCE(r))
6516 		goto err_vfio;
6517 
6518 	kvm_gmem_init(module);
6519 
6520 	/*
6521 	 * Registration _must_ be the very last thing done, as this exposes
6522 	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
6523 	 */
6524 	r = misc_register(&kvm_dev);
6525 	if (r) {
6526 		pr_err("kvm: misc device register failed\n");
6527 		goto err_register;
6528 	}
6529 
6530 	return 0;
6531 
6532 err_register:
6533 	kvm_vfio_ops_exit();
6534 err_vfio:
6535 	kvm_async_pf_deinit();
6536 err_async_pf:
6537 	kvm_irqfd_exit();
6538 err_irqfd:
6539 err_cpu_kick_mask:
6540 	for_each_possible_cpu(cpu)
6541 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6542 	kmem_cache_destroy(kvm_vcpu_cache);
6543 err_vcpu_cache:
6544 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6545 	unregister_syscore_ops(&kvm_syscore_ops);
6546 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6547 #endif
6548 	return r;
6549 }
6550 EXPORT_SYMBOL_GPL(kvm_init);
6551 
6552 void kvm_exit(void)
6553 {
6554 	int cpu;
6555 
6556 	/*
6557 	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6558 	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6559 	 * to KVM while the module is being stopped.
6560 	 */
6561 	misc_deregister(&kvm_dev);
6562 
6563 	debugfs_remove_recursive(kvm_debugfs_dir);
6564 	for_each_possible_cpu(cpu)
6565 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6566 	kmem_cache_destroy(kvm_vcpu_cache);
6567 	kvm_vfio_ops_exit();
6568 	kvm_async_pf_deinit();
6569 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6570 	unregister_syscore_ops(&kvm_syscore_ops);
6571 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6572 #endif
6573 	kvm_irqfd_exit();
6574 }
6575 EXPORT_SYMBOL_GPL(kvm_exit);
6576 
6577 struct kvm_vm_worker_thread_context {
6578 	struct kvm *kvm;
6579 	struct task_struct *parent;
6580 	struct completion init_done;
6581 	kvm_vm_thread_fn_t thread_fn;
6582 	uintptr_t data;
6583 	int err;
6584 };
6585 
6586 static int kvm_vm_worker_thread(void *context)
6587 {
6588 	/*
6589 	 * The init_context is allocated on the stack of the parent thread, so
6590 	 * we have to locally copy anything that is needed beyond initialization
6591 	 */
6592 	struct kvm_vm_worker_thread_context *init_context = context;
6593 	struct task_struct *parent;
6594 	struct kvm *kvm = init_context->kvm;
6595 	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6596 	uintptr_t data = init_context->data;
6597 	int err;
6598 
6599 	err = kthread_park(current);
6600 	/* kthread_park(current) is never supposed to return an error */
6601 	WARN_ON(err != 0);
6602 	if (err)
6603 		goto init_complete;
6604 
6605 	err = cgroup_attach_task_all(init_context->parent, current);
6606 	if (err) {
6607 		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6608 			__func__, err);
6609 		goto init_complete;
6610 	}
6611 
6612 	set_user_nice(current, task_nice(init_context->parent));
6613 
6614 init_complete:
6615 	init_context->err = err;
6616 	complete(&init_context->init_done);
6617 	init_context = NULL;
6618 
6619 	if (err)
6620 		goto out;
6621 
6622 	/* Wait to be woken up by the spawner before proceeding. */
6623 	kthread_parkme();
6624 
6625 	if (!kthread_should_stop())
6626 		err = thread_fn(kvm, data);
6627 
6628 out:
6629 	/*
6630 	 * Move kthread back to its original cgroup to prevent it lingering in
6631 	 * the cgroup of the VM process, after the latter finishes its
6632 	 * execution.
6633 	 *
6634 	 * kthread_stop() waits on the 'exited' completion condition which is
6635 	 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6636 	 * kthread is removed from the cgroup in the cgroup_exit() which is
6637 	 * called after the exit_mm(). This causes the kthread_stop() to return
6638 	 * before the kthread actually quits the cgroup.
6639 	 */
6640 	rcu_read_lock();
6641 	parent = rcu_dereference(current->real_parent);
6642 	get_task_struct(parent);
6643 	rcu_read_unlock();
6644 	cgroup_attach_task_all(parent, current);
6645 	put_task_struct(parent);
6646 
6647 	return err;
6648 }
6649 
6650 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6651 				uintptr_t data, const char *name,
6652 				struct task_struct **thread_ptr)
6653 {
6654 	struct kvm_vm_worker_thread_context init_context = {};
6655 	struct task_struct *thread;
6656 
6657 	*thread_ptr = NULL;
6658 	init_context.kvm = kvm;
6659 	init_context.parent = current;
6660 	init_context.thread_fn = thread_fn;
6661 	init_context.data = data;
6662 	init_completion(&init_context.init_done);
6663 
6664 	thread = kthread_run(kvm_vm_worker_thread, &init_context,
6665 			     "%s-%d", name, task_pid_nr(current));
6666 	if (IS_ERR(thread))
6667 		return PTR_ERR(thread);
6668 
6669 	/* kthread_run is never supposed to return NULL */
6670 	WARN_ON(thread == NULL);
6671 
6672 	wait_for_completion(&init_context.init_done);
6673 
6674 	if (!init_context.err)
6675 		*thread_ptr = thread;
6676 
6677 	return init_context.err;
6678 }
6679