1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HOST_H
3 #define __KVM_HOST_H
4
5 #include <linux/entry-virt.h>
6 #include <linux/types.h>
7 #include <linux/hardirq.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/spinlock.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/sched/stat.h>
14 #include <linux/bug.h>
15 #include <linux/minmax.h>
16 #include <linux/mm.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/preempt.h>
19 #include <linux/msi.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <linux/irqbypass.h>
28 #include <linux/rcuwait.h>
29 #include <linux/refcount.h>
30 #include <linux/nospec.h>
31 #include <linux/notifier.h>
32 #include <linux/ftrace.h>
33 #include <linux/hashtable.h>
34 #include <linux/instrumentation.h>
35 #include <linux/interval_tree.h>
36 #include <linux/rbtree.h>
37 #include <linux/xarray.h>
38 #include <asm/signal.h>
39
40 #include <linux/kvm.h>
41 #include <linux/kvm_para.h>
42
43 #include <linux/kvm_types.h>
44
45 #include <asm/kvm_host.h>
46 #include <linux/kvm_dirty_ring.h>
47
48 #ifndef KVM_MAX_VCPU_IDS
49 #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
50 #endif
51
52 /*
53 * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
54 * used in kvm, other bits are visible for userspace which are defined in
55 * include/uapi/linux/kvm.h.
56 */
57 #define KVM_MEMSLOT_INVALID (1UL << 16)
58 #define KVM_MEMSLOT_GMEM_ONLY (1UL << 17)
59
60 /*
61 * Bit 63 of the memslot generation number is an "update in-progress flag",
62 * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
63 * This flag effectively creates a unique generation number that is used to
64 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
65 * i.e. may (or may not) have come from the previous memslots generation.
66 *
67 * This is necessary because the actual memslots update is not atomic with
68 * respect to the generation number update. Updating the generation number
69 * first would allow a vCPU to cache a spte from the old memslots using the
70 * new generation number, and updating the generation number after switching
71 * to the new memslots would allow cache hits using the old generation number
72 * to reference the defunct memslots.
73 *
74 * This mechanism is used to prevent getting hits in KVM's caches while a
75 * memslot update is in-progress, and to prevent cache hits *after* updating
76 * the actual generation number against accesses that were inserted into the
77 * cache *before* the memslots were updated.
78 */
79 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
80
81 /* Two fragments for cross MMIO pages. */
82 #define KVM_MAX_MMIO_FRAGMENTS 2
83
84 #ifndef KVM_MAX_NR_ADDRESS_SPACES
85 #define KVM_MAX_NR_ADDRESS_SPACES 1
86 #endif
87
88 /*
89 * For the normal pfn, the highest 12 bits should be zero,
90 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
91 * mask bit 63 to indicate the noslot pfn.
92 */
93 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
94 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
95 #define KVM_PFN_NOSLOT (0x1ULL << 63)
96
97 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
98 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
99 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
100 #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
101 #define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
102
103 /*
104 * error pfns indicate that the gfn is in slot but faild to
105 * translate it to pfn on host.
106 */
is_error_pfn(kvm_pfn_t pfn)107 static inline bool is_error_pfn(kvm_pfn_t pfn)
108 {
109 return !!(pfn & KVM_PFN_ERR_MASK);
110 }
111
112 /*
113 * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
114 * by a pending signal. Note, the signal may or may not be fatal.
115 */
is_sigpending_pfn(kvm_pfn_t pfn)116 static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
117 {
118 return pfn == KVM_PFN_ERR_SIGPENDING;
119 }
120
121 /*
122 * error_noslot pfns indicate that the gfn can not be
123 * translated to pfn - it is not in slot or failed to
124 * translate it to pfn.
125 */
is_error_noslot_pfn(kvm_pfn_t pfn)126 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
127 {
128 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
129 }
130
131 /* noslot pfn indicates that the gfn is not in slot. */
is_noslot_pfn(kvm_pfn_t pfn)132 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
133 {
134 return pfn == KVM_PFN_NOSLOT;
135 }
136
137 /*
138 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
139 * provide own defines and kvm_is_error_hva
140 */
141 #ifndef KVM_HVA_ERR_BAD
142
143 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
144 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
145
kvm_is_error_hva(unsigned long addr)146 static inline bool kvm_is_error_hva(unsigned long addr)
147 {
148 return addr >= PAGE_OFFSET;
149 }
150
151 #endif
152
kvm_is_error_gpa(gpa_t gpa)153 static inline bool kvm_is_error_gpa(gpa_t gpa)
154 {
155 return gpa == INVALID_GPA;
156 }
157
158 #define KVM_REQUEST_MASK GENMASK(7,0)
159 #define KVM_REQUEST_NO_WAKEUP BIT(8)
160 #define KVM_REQUEST_WAIT BIT(9)
161 #define KVM_REQUEST_NO_ACTION BIT(10)
162 /*
163 * Architecture-independent vcpu->requests bit members
164 * Bits 3-7 are reserved for more arch-independent bits.
165 */
166 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
167 #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
168 #define KVM_REQ_UNBLOCK 2
169 #define KVM_REQ_DIRTY_RING_SOFT_FULL 3
170 #define KVM_REQUEST_ARCH_BASE 8
171
172 /*
173 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
174 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
175 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
176 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
177 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
178 * guarantee the vCPU received an IPI and has actually exited guest mode.
179 */
180 #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
181
182 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
183 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
184 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
185 })
186 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
187
188 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
189 unsigned long *vcpu_bitmap);
190 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
191
192 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
193 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
194 #define KVM_PIT_IRQ_SOURCE_ID 2
195
196 extern struct mutex kvm_lock;
197 extern struct list_head vm_list;
198
199 struct kvm_io_range {
200 gpa_t addr;
201 int len;
202 struct kvm_io_device *dev;
203 };
204
205 #define NR_IOBUS_DEVS 1000
206
207 struct kvm_io_bus {
208 int dev_count;
209 int ioeventfd_count;
210 struct rcu_head rcu;
211 struct kvm_io_range range[];
212 };
213
214 enum kvm_bus {
215 KVM_MMIO_BUS,
216 KVM_PIO_BUS,
217 KVM_VIRTIO_CCW_NOTIFY_BUS,
218 KVM_FAST_MMIO_BUS,
219 KVM_IOCSR_BUS,
220 KVM_NR_BUSES
221 };
222
223 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
224 int len, const void *val);
225 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
226 gpa_t addr, int len, const void *val, long cookie);
227 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
228 int len, void *val);
229 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
230 int len, struct kvm_io_device *dev);
231 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
232 struct kvm_io_device *dev);
233 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
234 gpa_t addr);
235
236 #ifdef CONFIG_KVM_ASYNC_PF
237 struct kvm_async_pf {
238 struct work_struct work;
239 struct list_head link;
240 struct list_head queue;
241 struct kvm_vcpu *vcpu;
242 gpa_t cr2_or_gpa;
243 unsigned long addr;
244 struct kvm_arch_async_pf arch;
245 bool wakeup_all;
246 bool notpresent_injected;
247 };
248
249 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
250 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
251 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
252 unsigned long hva, struct kvm_arch_async_pf *arch);
253 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
254 #endif
255
256 union kvm_mmu_notifier_arg {
257 unsigned long attributes;
258 };
259
260 enum kvm_gfn_range_filter {
261 KVM_FILTER_SHARED = BIT(0),
262 KVM_FILTER_PRIVATE = BIT(1),
263 };
264
265 struct kvm_gfn_range {
266 struct kvm_memory_slot *slot;
267 gfn_t start;
268 gfn_t end;
269 union kvm_mmu_notifier_arg arg;
270 enum kvm_gfn_range_filter attr_filter;
271 bool may_block;
272 bool lockless;
273 };
274 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
275 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
276 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
277
278 enum {
279 OUTSIDE_GUEST_MODE,
280 IN_GUEST_MODE,
281 EXITING_GUEST_MODE,
282 READING_SHADOW_PAGE_TABLES,
283 };
284
285 struct kvm_host_map {
286 /*
287 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
288 * a 'struct page' for it. When using mem= kernel parameter some memory
289 * can be used as guest memory but they are not managed by host
290 * kernel).
291 */
292 struct page *pinned_page;
293 struct page *page;
294 void *hva;
295 kvm_pfn_t pfn;
296 kvm_pfn_t gfn;
297 bool writable;
298 };
299
300 /*
301 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
302 * directly to check for that.
303 */
kvm_vcpu_mapped(struct kvm_host_map * map)304 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
305 {
306 return !!map->hva;
307 }
308
kvm_vcpu_can_poll(ktime_t cur,ktime_t stop)309 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
310 {
311 return single_task_running() && !need_resched() && ktime_before(cur, stop);
312 }
313
314 /*
315 * Sometimes a large or cross-page mmio needs to be broken up into separate
316 * exits for userspace servicing.
317 */
318 struct kvm_mmio_fragment {
319 gpa_t gpa;
320 void *data;
321 u64 val;
322 unsigned int len;
323 };
324
325 struct kvm_vcpu {
326 struct kvm *kvm;
327 #ifdef CONFIG_PREEMPT_NOTIFIERS
328 struct preempt_notifier preempt_notifier;
329 #endif
330 int cpu;
331 int vcpu_id; /* id given by userspace at creation */
332 int vcpu_idx; /* index into kvm->vcpu_array */
333 int ____srcu_idx; /* Don't use this directly. You've been warned. */
334 #ifdef CONFIG_PROVE_RCU
335 int srcu_depth;
336 #endif
337 int mode;
338 u64 requests;
339 unsigned long guest_debug;
340
341 struct mutex mutex;
342 struct kvm_run *run;
343
344 #ifndef __KVM_HAVE_ARCH_WQP
345 struct rcuwait wait;
346 #endif
347 struct pid *pid;
348 rwlock_t pid_lock;
349 int sigset_active;
350 sigset_t sigset;
351 unsigned int halt_poll_ns;
352 bool valid_wakeup;
353
354 #ifdef CONFIG_HAS_IOMEM
355 int mmio_needed;
356 int mmio_read_completed;
357 int mmio_is_write;
358 int mmio_cur_fragment;
359 int mmio_nr_fragments;
360 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
361 #endif
362
363 #ifdef CONFIG_KVM_ASYNC_PF
364 struct {
365 u32 queued;
366 struct list_head queue;
367 struct list_head done;
368 spinlock_t lock;
369 } async_pf;
370 #endif
371
372 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
373 /*
374 * Cpu relax intercept or pause loop exit optimization
375 * in_spin_loop: set when a vcpu does a pause loop exit
376 * or cpu relax intercepted.
377 * dy_eligible: indicates whether vcpu is eligible for directed yield.
378 */
379 struct {
380 bool in_spin_loop;
381 bool dy_eligible;
382 } spin_loop;
383 #endif
384 bool wants_to_run;
385 bool preempted;
386 bool ready;
387 bool scheduled_out;
388 struct kvm_vcpu_arch arch;
389 struct kvm_vcpu_stat stat;
390 char stats_id[KVM_STATS_NAME_SIZE];
391 struct kvm_dirty_ring dirty_ring;
392
393 /*
394 * The most recently used memslot by this vCPU and the slots generation
395 * for which it is valid.
396 * No wraparound protection is needed since generations won't overflow in
397 * thousands of years, even assuming 1M memslot operations per second.
398 */
399 struct kvm_memory_slot *last_used_slot;
400 u64 last_used_slot_gen;
401 };
402
403 /*
404 * Start accounting time towards a guest.
405 * Must be called before entering guest context.
406 */
guest_timing_enter_irqoff(void)407 static __always_inline void guest_timing_enter_irqoff(void)
408 {
409 /*
410 * This is running in ioctl context so its safe to assume that it's the
411 * stime pending cputime to flush.
412 */
413 instrumentation_begin();
414 vtime_account_guest_enter();
415 instrumentation_end();
416 }
417
418 /*
419 * Enter guest context and enter an RCU extended quiescent state.
420 *
421 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
422 * unsafe to use any code which may directly or indirectly use RCU, tracing
423 * (including IRQ flag tracing), or lockdep. All code in this period must be
424 * non-instrumentable.
425 */
guest_context_enter_irqoff(void)426 static __always_inline void guest_context_enter_irqoff(void)
427 {
428 /*
429 * KVM does not hold any references to rcu protected data when it
430 * switches CPU into a guest mode. In fact switching to a guest mode
431 * is very similar to exiting to userspace from rcu point of view. In
432 * addition CPU may stay in a guest mode for quite a long time (up to
433 * one time slice). Lets treat guest mode as quiescent state, just like
434 * we do with user-mode execution.
435 */
436 if (!context_tracking_guest_enter()) {
437 instrumentation_begin();
438 rcu_virt_note_context_switch();
439 instrumentation_end();
440 }
441 }
442
443 /*
444 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
445 * guest_state_enter_irqoff().
446 */
guest_enter_irqoff(void)447 static __always_inline void guest_enter_irqoff(void)
448 {
449 guest_timing_enter_irqoff();
450 guest_context_enter_irqoff();
451 }
452
453 /**
454 * guest_state_enter_irqoff - Fixup state when entering a guest
455 *
456 * Entry to a guest will enable interrupts, but the kernel state is interrupts
457 * disabled when this is invoked. Also tell RCU about it.
458 *
459 * 1) Trace interrupts on state
460 * 2) Invoke context tracking if enabled to adjust RCU state
461 * 3) Tell lockdep that interrupts are enabled
462 *
463 * Invoked from architecture specific code before entering a guest.
464 * Must be called with interrupts disabled and the caller must be
465 * non-instrumentable.
466 * The caller has to invoke guest_timing_enter_irqoff() before this.
467 *
468 * Note: this is analogous to exit_to_user_mode().
469 */
guest_state_enter_irqoff(void)470 static __always_inline void guest_state_enter_irqoff(void)
471 {
472 instrumentation_begin();
473 trace_hardirqs_on_prepare();
474 lockdep_hardirqs_on_prepare();
475 instrumentation_end();
476
477 guest_context_enter_irqoff();
478 lockdep_hardirqs_on(CALLER_ADDR0);
479 }
480
481 /*
482 * Exit guest context and exit an RCU extended quiescent state.
483 *
484 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
485 * unsafe to use any code which may directly or indirectly use RCU, tracing
486 * (including IRQ flag tracing), or lockdep. All code in this period must be
487 * non-instrumentable.
488 */
guest_context_exit_irqoff(void)489 static __always_inline void guest_context_exit_irqoff(void)
490 {
491 /*
492 * Guest mode is treated as a quiescent state, see
493 * guest_context_enter_irqoff() for more details.
494 */
495 if (!context_tracking_guest_exit()) {
496 instrumentation_begin();
497 rcu_virt_note_context_switch();
498 instrumentation_end();
499 }
500 }
501
502 /*
503 * Stop accounting time towards a guest.
504 * Must be called after exiting guest context.
505 */
guest_timing_exit_irqoff(void)506 static __always_inline void guest_timing_exit_irqoff(void)
507 {
508 instrumentation_begin();
509 /* Flush the guest cputime we spent on the guest */
510 vtime_account_guest_exit();
511 instrumentation_end();
512 }
513
514 /*
515 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
516 * guest_timing_exit_irqoff().
517 */
guest_exit_irqoff(void)518 static __always_inline void guest_exit_irqoff(void)
519 {
520 guest_context_exit_irqoff();
521 guest_timing_exit_irqoff();
522 }
523
guest_exit(void)524 static inline void guest_exit(void)
525 {
526 unsigned long flags;
527
528 local_irq_save(flags);
529 guest_exit_irqoff();
530 local_irq_restore(flags);
531 }
532
533 /**
534 * guest_state_exit_irqoff - Establish state when returning from guest mode
535 *
536 * Entry from a guest disables interrupts, but guest mode is traced as
537 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
538 *
539 * 1) Tell lockdep that interrupts are disabled
540 * 2) Invoke context tracking if enabled to reactivate RCU
541 * 3) Trace interrupts off state
542 *
543 * Invoked from architecture specific code after exiting a guest.
544 * Must be invoked with interrupts disabled and the caller must be
545 * non-instrumentable.
546 * The caller has to invoke guest_timing_exit_irqoff() after this.
547 *
548 * Note: this is analogous to enter_from_user_mode().
549 */
guest_state_exit_irqoff(void)550 static __always_inline void guest_state_exit_irqoff(void)
551 {
552 lockdep_hardirqs_off(CALLER_ADDR0);
553 guest_context_exit_irqoff();
554
555 instrumentation_begin();
556 trace_hardirqs_off_finish();
557 instrumentation_end();
558 }
559
kvm_vcpu_exiting_guest_mode(struct kvm_vcpu * vcpu)560 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
561 {
562 /*
563 * The memory barrier ensures a previous write to vcpu->requests cannot
564 * be reordered with the read of vcpu->mode. It pairs with the general
565 * memory barrier following the write of vcpu->mode in VCPU RUN.
566 */
567 smp_mb__before_atomic();
568 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
569 }
570
571 /*
572 * Some of the bitops functions do not support too long bitmaps.
573 * This number must be determined not to exceed such limits.
574 */
575 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
576
577 /*
578 * Since at idle each memslot belongs to two memslot sets it has to contain
579 * two embedded nodes for each data structure that it forms a part of.
580 *
581 * Two memslot sets (one active and one inactive) are necessary so the VM
582 * continues to run on one memslot set while the other is being modified.
583 *
584 * These two memslot sets normally point to the same set of memslots.
585 * They can, however, be desynchronized when performing a memslot management
586 * operation by replacing the memslot to be modified by its copy.
587 * After the operation is complete, both memslot sets once again point to
588 * the same, common set of memslot data.
589 *
590 * The memslots themselves are independent of each other so they can be
591 * individually added or deleted.
592 */
593 struct kvm_memory_slot {
594 struct hlist_node id_node[2];
595 struct interval_tree_node hva_node[2];
596 struct rb_node gfn_node[2];
597 gfn_t base_gfn;
598 unsigned long npages;
599 unsigned long *dirty_bitmap;
600 struct kvm_arch_memory_slot arch;
601 unsigned long userspace_addr;
602 u32 flags;
603 short id;
604 u16 as_id;
605
606 #ifdef CONFIG_KVM_GUEST_MEMFD
607 struct {
608 /*
609 * Writes protected by kvm->slots_lock. Acquiring a
610 * reference via kvm_gmem_get_file() is protected by
611 * either kvm->slots_lock or kvm->srcu.
612 */
613 struct file *file;
614 pgoff_t pgoff;
615 } gmem;
616 #endif
617 };
618
kvm_slot_has_gmem(const struct kvm_memory_slot * slot)619 static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot)
620 {
621 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
622 }
623
kvm_slot_dirty_track_enabled(const struct kvm_memory_slot * slot)624 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
625 {
626 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
627 }
628
kvm_dirty_bitmap_bytes(struct kvm_memory_slot * memslot)629 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
630 {
631 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
632 }
633
kvm_second_dirty_bitmap(struct kvm_memory_slot * memslot)634 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
635 {
636 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
637
638 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
639 }
640
641 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS
642 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
643 #endif
644
645 struct kvm_s390_adapter_int {
646 u64 ind_addr;
647 u64 ind_gaddr;
648 u64 summary_addr;
649 u64 summary_gaddr;
650 u64 ind_offset;
651 u32 summary_offset;
652 u32 adapter_id;
653 };
654
655 struct kvm_hv_sint {
656 u32 vcpu;
657 u32 sint;
658 };
659
660 struct kvm_xen_evtchn {
661 u32 port;
662 u32 vcpu_id;
663 int vcpu_idx;
664 u32 priority;
665 };
666
667 struct kvm_kernel_irq_routing_entry {
668 u32 gsi;
669 u32 type;
670 int (*set)(struct kvm_kernel_irq_routing_entry *e,
671 struct kvm *kvm, int irq_source_id, int level,
672 bool line_status);
673 union {
674 struct {
675 unsigned irqchip;
676 unsigned pin;
677 } irqchip;
678 struct {
679 u32 address_lo;
680 u32 address_hi;
681 u32 data;
682 u32 flags;
683 u32 devid;
684 } msi;
685 struct kvm_s390_adapter_int adapter;
686 struct kvm_hv_sint hv_sint;
687 struct kvm_xen_evtchn xen_evtchn;
688 };
689 struct hlist_node link;
690 };
691
692 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
693 struct kvm_irq_routing_table {
694 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
695 u32 nr_rt_entries;
696 /*
697 * Array indexed by gsi. Each entry contains list of irq chips
698 * the gsi is connected to.
699 */
700 struct hlist_head map[] __counted_by(nr_rt_entries);
701 };
702 #endif
703
704 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
705
706 #ifndef KVM_INTERNAL_MEM_SLOTS
707 #define KVM_INTERNAL_MEM_SLOTS 0
708 #endif
709
710 #define KVM_MEM_SLOTS_NUM SHRT_MAX
711 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
712
713 #if KVM_MAX_NR_ADDRESS_SPACES == 1
kvm_arch_nr_memslot_as_ids(struct kvm * kvm)714 static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
715 {
716 return KVM_MAX_NR_ADDRESS_SPACES;
717 }
718
kvm_arch_vcpu_memslots_id(struct kvm_vcpu * vcpu)719 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
720 {
721 return 0;
722 }
723 #endif
724
725 #ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_arch_has_private_mem(struct kvm * kvm)726 static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
727 {
728 return false;
729 }
730 #endif
731
732 #ifdef CONFIG_KVM_GUEST_MEMFD
733 bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
734
kvm_gmem_get_supported_flags(struct kvm * kvm)735 static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
736 {
737 u64 flags = GUEST_MEMFD_FLAG_MMAP;
738
739 if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
740 flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
741
742 return flags;
743 }
744 #endif
745
746 #ifndef kvm_arch_has_readonly_mem
kvm_arch_has_readonly_mem(struct kvm * kvm)747 static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
748 {
749 return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
750 }
751 #endif
752
753 struct kvm_memslots {
754 u64 generation;
755 atomic_long_t last_used_slot;
756 struct rb_root_cached hva_tree;
757 struct rb_root gfn_tree;
758 /*
759 * The mapping table from slot id to memslot.
760 *
761 * 7-bit bucket count matches the size of the old id to index array for
762 * 512 slots, while giving good performance with this slot count.
763 * Higher bucket counts bring only small performance improvements but
764 * always result in higher memory usage (even for lower memslot counts).
765 */
766 DECLARE_HASHTABLE(id_hash, 7);
767 int node_idx;
768 };
769
770 struct kvm {
771 #ifdef KVM_HAVE_MMU_RWLOCK
772 rwlock_t mmu_lock;
773 #else
774 spinlock_t mmu_lock;
775 #endif /* KVM_HAVE_MMU_RWLOCK */
776
777 struct mutex slots_lock;
778
779 /*
780 * Protects the arch-specific fields of struct kvm_memory_slots in
781 * use by the VM. To be used under the slots_lock (above) or in a
782 * kvm->srcu critical section where acquiring the slots_lock would
783 * lead to deadlock with the synchronize_srcu in
784 * kvm_swap_active_memslots().
785 */
786 struct mutex slots_arch_lock;
787 struct mm_struct *mm; /* userspace tied to this vm */
788 unsigned long nr_memslot_pages;
789 /* The two memslot sets - active and inactive (per address space) */
790 struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
791 /* The current active memslot set for each address space */
792 struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
793 struct xarray vcpu_array;
794 /*
795 * Protected by slots_lock, but can be read outside if an
796 * incorrect answer is acceptable.
797 */
798 atomic_t nr_memslots_dirty_logging;
799
800 /* Used to wait for completion of MMU notifiers. */
801 spinlock_t mn_invalidate_lock;
802 unsigned long mn_active_invalidate_count;
803 struct rcuwait mn_memslots_update_rcuwait;
804
805 /* For management / invalidation of gfn_to_pfn_caches */
806 spinlock_t gpc_lock;
807 struct list_head gpc_list;
808
809 /*
810 * created_vcpus is protected by kvm->lock, and is incremented
811 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
812 * incremented after storing the kvm_vcpu pointer in vcpus,
813 * and is accessed atomically.
814 */
815 atomic_t online_vcpus;
816 int max_vcpus;
817 int created_vcpus;
818 int last_boosted_vcpu;
819 struct list_head vm_list;
820 struct mutex lock;
821 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
822 #ifdef CONFIG_HAVE_KVM_IRQCHIP
823 struct {
824 spinlock_t lock;
825 struct list_head items;
826 /* resampler_list update side is protected by resampler_lock. */
827 struct list_head resampler_list;
828 struct mutex resampler_lock;
829 } irqfds;
830 #endif
831 struct list_head ioeventfds;
832 struct kvm_vm_stat stat;
833 struct kvm_arch arch;
834 refcount_t users_count;
835 #ifdef CONFIG_KVM_MMIO
836 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
837 spinlock_t ring_lock;
838 struct list_head coalesced_zones;
839 #endif
840
841 struct mutex irq_lock;
842 #ifdef CONFIG_HAVE_KVM_IRQCHIP
843 /*
844 * Update side is protected by irq_lock.
845 */
846 struct kvm_irq_routing_table __rcu *irq_routing;
847
848 struct hlist_head irq_ack_notifier_list;
849 #endif
850
851 struct mmu_notifier mmu_notifier;
852 unsigned long mmu_invalidate_seq;
853 long mmu_invalidate_in_progress;
854 gfn_t mmu_invalidate_range_start;
855 gfn_t mmu_invalidate_range_end;
856
857 struct list_head devices;
858 u64 manual_dirty_log_protect;
859 struct dentry *debugfs_dentry;
860 struct kvm_stat_data **debugfs_stat_data;
861 struct srcu_struct srcu;
862 struct srcu_struct irq_srcu;
863 pid_t userspace_pid;
864 bool override_halt_poll_ns;
865 unsigned int max_halt_poll_ns;
866 u32 dirty_ring_size;
867 bool dirty_ring_with_bitmap;
868 bool vm_bugged;
869 bool vm_dead;
870
871 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
872 struct notifier_block pm_notifier;
873 #endif
874 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
875 /* Protected by slots_lock (for writes) and RCU (for reads) */
876 struct xarray mem_attr_array;
877 #endif
878 char stats_id[KVM_STATS_NAME_SIZE];
879 };
880
881 #define kvm_err(fmt, ...) \
882 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
883 #define kvm_info(fmt, ...) \
884 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
885 #define kvm_debug(fmt, ...) \
886 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
887 #define kvm_debug_ratelimited(fmt, ...) \
888 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
889 ## __VA_ARGS__)
890 #define kvm_pr_unimpl(fmt, ...) \
891 pr_err_ratelimited("kvm [%i]: " fmt, \
892 task_tgid_nr(current), ## __VA_ARGS__)
893
894 /* The guest did something we don't support. */
895 #define vcpu_unimpl(vcpu, fmt, ...) \
896 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
897 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
898
899 #define vcpu_debug(vcpu, fmt, ...) \
900 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
901 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
902 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
903 ## __VA_ARGS__)
904 #define vcpu_err(vcpu, fmt, ...) \
905 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
906
kvm_vm_dead(struct kvm * kvm)907 static inline void kvm_vm_dead(struct kvm *kvm)
908 {
909 kvm->vm_dead = true;
910 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
911 }
912
kvm_vm_bugged(struct kvm * kvm)913 static inline void kvm_vm_bugged(struct kvm *kvm)
914 {
915 kvm->vm_bugged = true;
916 kvm_vm_dead(kvm);
917 }
918
919
920 #define KVM_BUG(cond, kvm, fmt...) \
921 ({ \
922 bool __ret = !!(cond); \
923 \
924 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
925 kvm_vm_bugged(kvm); \
926 unlikely(__ret); \
927 })
928
929 #define KVM_BUG_ON(cond, kvm) \
930 ({ \
931 bool __ret = !!(cond); \
932 \
933 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
934 kvm_vm_bugged(kvm); \
935 unlikely(__ret); \
936 })
937
938 /*
939 * Note, "data corruption" refers to corruption of host kernel data structures,
940 * not guest data. Guest data corruption, suspected or confirmed, that is tied
941 * and contained to a single VM should *never* BUG() and potentially panic the
942 * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
943 * is corrupted and that corruption can have a cascading effect to other parts
944 * of the hosts and/or to other VMs.
945 */
946 #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
947 ({ \
948 bool __ret = !!(cond); \
949 \
950 if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
951 BUG_ON(__ret); \
952 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
953 kvm_vm_bugged(kvm); \
954 unlikely(__ret); \
955 })
956
kvm_vcpu_srcu_read_lock(struct kvm_vcpu * vcpu)957 static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
958 {
959 #ifdef CONFIG_PROVE_RCU
960 WARN_ONCE(vcpu->srcu_depth++,
961 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
962 #endif
963 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
964 }
965
kvm_vcpu_srcu_read_unlock(struct kvm_vcpu * vcpu)966 static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
967 {
968 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
969
970 #ifdef CONFIG_PROVE_RCU
971 WARN_ONCE(--vcpu->srcu_depth,
972 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
973 #endif
974 }
975
kvm_dirty_log_manual_protect_and_init_set(struct kvm * kvm)976 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
977 {
978 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
979 }
980
981 /*
982 * Get a bus reference under the update-side lock. No long-term SRCU reader
983 * references are permitted, to avoid stale reads vs concurrent IO
984 * registrations.
985 */
kvm_get_bus(struct kvm * kvm,enum kvm_bus idx)986 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
987 {
988 return rcu_dereference_protected(kvm->buses[idx],
989 lockdep_is_held(&kvm->slots_lock));
990 }
991
kvm_get_vcpu(struct kvm * kvm,int i)992 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
993 {
994 int num_vcpus = atomic_read(&kvm->online_vcpus);
995
996 /*
997 * Explicitly verify the target vCPU is online, as the anti-speculation
998 * logic only limits the CPU's ability to speculate, e.g. given a "bad"
999 * index, clamping the index to 0 would return vCPU0, not NULL.
1000 */
1001 if (i >= num_vcpus)
1002 return NULL;
1003
1004 i = array_index_nospec(i, num_vcpus);
1005
1006 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
1007 smp_rmb();
1008 return xa_load(&kvm->vcpu_array, i);
1009 }
1010
1011 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
1012 if (atomic_read(&kvm->online_vcpus)) \
1013 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
1014 (atomic_read(&kvm->online_vcpus) - 1))
1015
kvm_get_vcpu_by_id(struct kvm * kvm,int id)1016 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
1017 {
1018 struct kvm_vcpu *vcpu = NULL;
1019 unsigned long i;
1020
1021 if (id < 0)
1022 return NULL;
1023 if (id < KVM_MAX_VCPUS)
1024 vcpu = kvm_get_vcpu(kvm, id);
1025 if (vcpu && vcpu->vcpu_id == id)
1026 return vcpu;
1027 kvm_for_each_vcpu(i, vcpu, kvm)
1028 if (vcpu->vcpu_id == id)
1029 return vcpu;
1030 return NULL;
1031 }
1032
kvm_is_vcpu_creation_in_progress(struct kvm * kvm)1033 static inline bool kvm_is_vcpu_creation_in_progress(struct kvm *kvm)
1034 {
1035 lockdep_assert_held(&kvm->lock);
1036
1037 return kvm->created_vcpus != atomic_read(&kvm->online_vcpus);
1038 }
1039
1040 void kvm_destroy_vcpus(struct kvm *kvm);
1041
1042 int kvm_trylock_all_vcpus(struct kvm *kvm);
1043 int kvm_lock_all_vcpus(struct kvm *kvm);
1044 void kvm_unlock_all_vcpus(struct kvm *kvm);
1045
1046 void vcpu_load(struct kvm_vcpu *vcpu);
1047 void vcpu_put(struct kvm_vcpu *vcpu);
1048
1049 #ifdef CONFIG_KVM_IOAPIC
1050 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
1051 #else
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)1052 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
1053 {
1054 }
1055 #endif
1056
1057 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1058 int kvm_irqfd_init(void);
1059 void kvm_irqfd_exit(void);
1060 #else
kvm_irqfd_init(void)1061 static inline int kvm_irqfd_init(void)
1062 {
1063 return 0;
1064 }
1065
kvm_irqfd_exit(void)1066 static inline void kvm_irqfd_exit(void)
1067 {
1068 }
1069 #endif
1070 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
1071 void kvm_exit(void);
1072
1073 void kvm_get_kvm(struct kvm *kvm);
1074 bool kvm_get_kvm_safe(struct kvm *kvm);
1075 void kvm_put_kvm(struct kvm *kvm);
1076 bool file_is_kvm(struct file *file);
1077 void kvm_put_kvm_no_destroy(struct kvm *kvm);
1078
__kvm_memslots(struct kvm * kvm,int as_id)1079 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
1080 {
1081 as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
1082 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
1083 lockdep_is_held(&kvm->slots_lock) ||
1084 !refcount_read(&kvm->users_count));
1085 }
1086
kvm_memslots(struct kvm * kvm)1087 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
1088 {
1089 return __kvm_memslots(kvm, 0);
1090 }
1091
kvm_vcpu_memslots(struct kvm_vcpu * vcpu)1092 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
1093 {
1094 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
1095
1096 return __kvm_memslots(vcpu->kvm, as_id);
1097 }
1098
kvm_memslots_empty(struct kvm_memslots * slots)1099 static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
1100 {
1101 return RB_EMPTY_ROOT(&slots->gfn_tree);
1102 }
1103
1104 bool kvm_are_all_memslots_empty(struct kvm *kvm);
1105
1106 #define kvm_for_each_memslot(memslot, bkt, slots) \
1107 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1108 if (WARN_ON_ONCE(!memslot->npages)) { \
1109 } else
1110
1111 static inline
id_to_memslot(struct kvm_memslots * slots,int id)1112 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
1113 {
1114 struct kvm_memory_slot *slot;
1115 int idx = slots->node_idx;
1116
1117 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
1118 if (slot->id == id)
1119 return slot;
1120 }
1121
1122 return NULL;
1123 }
1124
1125 /* Iterator used for walking memslots that overlap a gfn range. */
1126 struct kvm_memslot_iter {
1127 struct kvm_memslots *slots;
1128 struct rb_node *node;
1129 struct kvm_memory_slot *slot;
1130 };
1131
kvm_memslot_iter_next(struct kvm_memslot_iter * iter)1132 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
1133 {
1134 iter->node = rb_next(iter->node);
1135 if (!iter->node)
1136 return;
1137
1138 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
1139 }
1140
kvm_memslot_iter_start(struct kvm_memslot_iter * iter,struct kvm_memslots * slots,gfn_t start)1141 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
1142 struct kvm_memslots *slots,
1143 gfn_t start)
1144 {
1145 int idx = slots->node_idx;
1146 struct rb_node *tmp;
1147 struct kvm_memory_slot *slot;
1148
1149 iter->slots = slots;
1150
1151 /*
1152 * Find the so called "upper bound" of a key - the first node that has
1153 * its key strictly greater than the searched one (the start gfn in our case).
1154 */
1155 iter->node = NULL;
1156 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
1157 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
1158 if (start < slot->base_gfn) {
1159 iter->node = tmp;
1160 tmp = tmp->rb_left;
1161 } else {
1162 tmp = tmp->rb_right;
1163 }
1164 }
1165
1166 /*
1167 * Find the slot with the lowest gfn that can possibly intersect with
1168 * the range, so we'll ideally have slot start <= range start
1169 */
1170 if (iter->node) {
1171 /*
1172 * A NULL previous node means that the very first slot
1173 * already has a higher start gfn.
1174 * In this case slot start > range start.
1175 */
1176 tmp = rb_prev(iter->node);
1177 if (tmp)
1178 iter->node = tmp;
1179 } else {
1180 /* a NULL node below means no slots */
1181 iter->node = rb_last(&slots->gfn_tree);
1182 }
1183
1184 if (iter->node) {
1185 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1186
1187 /*
1188 * It is possible in the slot start < range start case that the
1189 * found slot ends before or at range start (slot end <= range start)
1190 * and so it does not overlap the requested range.
1191 *
1192 * In such non-overlapping case the next slot (if it exists) will
1193 * already have slot start > range start, otherwise the logic above
1194 * would have found it instead of the current slot.
1195 */
1196 if (iter->slot->base_gfn + iter->slot->npages <= start)
1197 kvm_memslot_iter_next(iter);
1198 }
1199 }
1200
kvm_memslot_iter_is_valid(struct kvm_memslot_iter * iter,gfn_t end)1201 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1202 {
1203 if (!iter->node)
1204 return false;
1205
1206 /*
1207 * If this slot starts beyond or at the end of the range so does
1208 * every next one
1209 */
1210 return iter->slot->base_gfn < end;
1211 }
1212
1213 /* Iterate over each memslot at least partially intersecting [start, end) range */
1214 #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1215 for (kvm_memslot_iter_start(iter, slots, start); \
1216 kvm_memslot_iter_is_valid(iter, end); \
1217 kvm_memslot_iter_next(iter))
1218
1219 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1220 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1221 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1222
1223 /*
1224 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
1225 * - create a new memory slot
1226 * - delete an existing memory slot
1227 * - modify an existing memory slot
1228 * -- move it in the guest physical memory space
1229 * -- just change its flags
1230 *
1231 * Since flags can be changed by some of these operations, the following
1232 * differentiation is the best we can do for kvm_set_memory_region():
1233 */
1234 enum kvm_mr_change {
1235 KVM_MR_CREATE,
1236 KVM_MR_DELETE,
1237 KVM_MR_MOVE,
1238 KVM_MR_FLAGS_ONLY,
1239 };
1240
1241 int kvm_set_internal_memslot(struct kvm *kvm,
1242 const struct kvm_userspace_memory_region2 *mem);
1243 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1244 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1245 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1246 const struct kvm_memory_slot *old,
1247 struct kvm_memory_slot *new,
1248 enum kvm_mr_change change);
1249 void kvm_arch_commit_memory_region(struct kvm *kvm,
1250 struct kvm_memory_slot *old,
1251 const struct kvm_memory_slot *new,
1252 enum kvm_mr_change change);
1253 /* flush all memory translations */
1254 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1255 /* flush memory translations pointing to 'slot' */
1256 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1257 struct kvm_memory_slot *slot);
1258
1259 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
1260 struct page **pages, int nr_pages);
1261
1262 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
gfn_to_page(struct kvm * kvm,gfn_t gfn)1263 static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1264 {
1265 return __gfn_to_page(kvm, gfn, true);
1266 }
1267
1268 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1269 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1270 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1271 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1272 bool *writable);
1273
kvm_release_page_unused(struct page * page)1274 static inline void kvm_release_page_unused(struct page *page)
1275 {
1276 if (!page)
1277 return;
1278
1279 put_page(page);
1280 }
1281
1282 void kvm_release_page_clean(struct page *page);
1283 void kvm_release_page_dirty(struct page *page);
1284
kvm_release_faultin_page(struct kvm * kvm,struct page * page,bool unused,bool dirty)1285 static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
1286 bool unused, bool dirty)
1287 {
1288 lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
1289
1290 if (!page)
1291 return;
1292
1293 /*
1294 * If the page that KVM got from the *primary MMU* is writable, and KVM
1295 * installed or reused a SPTE, mark the page/folio dirty. Note, this
1296 * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
1297 * the GFN is write-protected. Folios can't be safely marked dirty
1298 * outside of mmu_lock as doing so could race with writeback on the
1299 * folio. As a result, KVM can't mark folios dirty in the fast page
1300 * fault handler, and so KVM must (somewhat) speculatively mark the
1301 * folio dirty if KVM could locklessly make the SPTE writable.
1302 */
1303 if (unused)
1304 kvm_release_page_unused(page);
1305 else if (dirty)
1306 kvm_release_page_dirty(page);
1307 else
1308 kvm_release_page_clean(page);
1309 }
1310
1311 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
1312 unsigned int foll, bool *writable,
1313 struct page **refcounted_page);
1314
kvm_faultin_pfn(struct kvm_vcpu * vcpu,gfn_t gfn,bool write,bool * writable,struct page ** refcounted_page)1315 static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
1316 bool write, bool *writable,
1317 struct page **refcounted_page)
1318 {
1319 return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
1320 write ? FOLL_WRITE : 0, writable, refcounted_page);
1321 }
1322
1323 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1324 int len);
1325 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1326 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1327 void *data, unsigned long len);
1328 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1329 void *data, unsigned int offset,
1330 unsigned long len);
1331 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1332 int offset, int len);
1333 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1334 unsigned long len);
1335 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1336 void *data, unsigned long len);
1337 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1338 void *data, unsigned int offset,
1339 unsigned long len);
1340 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1341 gpa_t gpa, unsigned long len);
1342
1343 #define __kvm_get_guest(kvm, gfn, offset, v) \
1344 ({ \
1345 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1346 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1347 int __ret = -EFAULT; \
1348 \
1349 if (!kvm_is_error_hva(__addr)) \
1350 __ret = get_user(v, __uaddr); \
1351 __ret; \
1352 })
1353
1354 #define kvm_get_guest(kvm, gpa, v) \
1355 ({ \
1356 gpa_t __gpa = gpa; \
1357 struct kvm *__kvm = kvm; \
1358 \
1359 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1360 offset_in_page(__gpa), v); \
1361 })
1362
1363 #define __kvm_put_guest(kvm, gfn, offset, v) \
1364 ({ \
1365 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1366 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1367 int __ret = -EFAULT; \
1368 \
1369 if (!kvm_is_error_hva(__addr)) \
1370 __ret = put_user(v, __uaddr); \
1371 if (!__ret) \
1372 mark_page_dirty(kvm, gfn); \
1373 __ret; \
1374 })
1375
1376 #define kvm_put_guest(kvm, gpa, v) \
1377 ({ \
1378 gpa_t __gpa = gpa; \
1379 struct kvm *__kvm = kvm; \
1380 \
1381 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1382 offset_in_page(__gpa), v); \
1383 })
1384
1385 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1386 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1387 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1388 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1389 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1390 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1391 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1392
1393 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
1394 bool writable);
1395 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
1396
kvm_vcpu_map(struct kvm_vcpu * vcpu,gpa_t gpa,struct kvm_host_map * map)1397 static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
1398 struct kvm_host_map *map)
1399 {
1400 return __kvm_vcpu_map(vcpu, gpa, map, true);
1401 }
1402
kvm_vcpu_map_readonly(struct kvm_vcpu * vcpu,gpa_t gpa,struct kvm_host_map * map)1403 static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
1404 struct kvm_host_map *map)
1405 {
1406 return __kvm_vcpu_map(vcpu, gpa, map, false);
1407 }
1408
kvm_vcpu_map_mark_dirty(struct kvm_vcpu * vcpu,struct kvm_host_map * map)1409 static inline void kvm_vcpu_map_mark_dirty(struct kvm_vcpu *vcpu,
1410 struct kvm_host_map *map)
1411 {
1412 if (kvm_vcpu_mapped(map))
1413 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
1414 }
1415
1416 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1417 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1418 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1419 int len);
1420 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1421 unsigned long len);
1422 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1423 unsigned long len);
1424 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1425 int offset, int len);
1426 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1427 unsigned long len);
1428
1429 /**
1430 * kvm_gpc_init - initialize gfn_to_pfn_cache.
1431 *
1432 * @gpc: struct gfn_to_pfn_cache object.
1433 * @kvm: pointer to kvm instance.
1434 *
1435 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
1436 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
1437 * the caller before init).
1438 */
1439 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
1440
1441 /**
1442 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
1443 * physical address.
1444 *
1445 * @gpc: struct gfn_to_pfn_cache object.
1446 * @gpa: guest physical address to map.
1447 * @len: sanity check; the range being access must fit a single page.
1448 *
1449 * @return: 0 for success.
1450 * -EINVAL for a mapping which would cross a page boundary.
1451 * -EFAULT for an untranslatable guest physical address.
1452 *
1453 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
1454 * invalidations to be processed. Callers are required to use kvm_gpc_check()
1455 * to ensure that the cache is valid before accessing the target page.
1456 */
1457 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
1458
1459 /**
1460 * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
1461 *
1462 * @gpc: struct gfn_to_pfn_cache object.
1463 * @hva: userspace virtual address to map.
1464 * @len: sanity check; the range being access must fit a single page.
1465 *
1466 * @return: 0 for success.
1467 * -EINVAL for a mapping which would cross a page boundary.
1468 * -EFAULT for an untranslatable guest physical address.
1469 *
1470 * The semantics of this function are the same as those of kvm_gpc_activate(). It
1471 * merely bypasses a layer of address translation.
1472 */
1473 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
1474
1475 /**
1476 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
1477 *
1478 * @gpc: struct gfn_to_pfn_cache object.
1479 * @len: sanity check; the range being access must fit a single page.
1480 *
1481 * @return: %true if the cache is still valid and the address matches.
1482 * %false if the cache is not valid.
1483 *
1484 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1485 * while calling this function, and then continue to hold the lock until the
1486 * access is complete.
1487 *
1488 * Callers in IN_GUEST_MODE may do so without locking, although they should
1489 * still hold a read lock on kvm->scru for the memslot checks.
1490 */
1491 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
1492
1493 /**
1494 * kvm_gpc_refresh - update a previously initialized cache.
1495 *
1496 * @gpc: struct gfn_to_pfn_cache object.
1497 * @len: sanity check; the range being access must fit a single page.
1498 *
1499 * @return: 0 for success.
1500 * -EINVAL for a mapping which would cross a page boundary.
1501 * -EFAULT for an untranslatable guest physical address.
1502 *
1503 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
1504 * return from this function does not mean the page can be immediately
1505 * accessed because it may have raced with an invalidation. Callers must
1506 * still lock and check the cache status, as this function does not return
1507 * with the lock still held to permit access.
1508 */
1509 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
1510
1511 /**
1512 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
1513 *
1514 * @gpc: struct gfn_to_pfn_cache object.
1515 *
1516 * This removes a cache from the VM's list to be processed on MMU notifier
1517 * invocation.
1518 */
1519 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
1520
kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache * gpc)1521 static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
1522 {
1523 return gpc->active && !kvm_is_error_gpa(gpc->gpa);
1524 }
1525
kvm_gpc_is_hva_active(struct gfn_to_pfn_cache * gpc)1526 static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
1527 {
1528 return gpc->active && kvm_is_error_gpa(gpc->gpa);
1529 }
1530
1531 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1532 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1533
1534 void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1535 bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1536 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1537 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1538 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1539
1540 #ifndef CONFIG_S390
1541 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
1542
kvm_vcpu_kick(struct kvm_vcpu * vcpu)1543 static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1544 {
1545 __kvm_vcpu_kick(vcpu, false);
1546 }
1547 #endif
1548
1549 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1550 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
1551
1552 void kvm_flush_remote_tlbs(struct kvm *kvm);
1553 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1554 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1555 const struct kvm_memory_slot *memslot);
1556
1557 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1558 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1559 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
1560 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1561 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1562 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1563 #endif
1564
1565 void kvm_mmu_invalidate_begin(struct kvm *kvm);
1566 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
1567 void kvm_mmu_invalidate_end(struct kvm *kvm);
1568 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
1569
1570 long kvm_arch_dev_ioctl(struct file *filp,
1571 unsigned int ioctl, unsigned long arg);
1572 long kvm_arch_vcpu_ioctl(struct file *filp,
1573 unsigned int ioctl, unsigned long arg);
1574 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
1575 unsigned int ioctl, unsigned long arg);
1576 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1577
1578 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1579
1580 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1581 struct kvm_memory_slot *slot,
1582 gfn_t gfn_offset,
1583 unsigned long mask);
1584 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1585
1586 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1587 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1588 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1589 int *is_dirty, struct kvm_memory_slot **memslot);
1590 #endif
1591
1592 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1593 bool line_status);
1594 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1595 struct kvm_enable_cap *cap);
1596 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
1597 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
1598 unsigned long arg);
1599
1600 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1601 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1602
1603 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1604 struct kvm_translation *tr);
1605
1606 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1607 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1608 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1609 struct kvm_sregs *sregs);
1610 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1611 struct kvm_sregs *sregs);
1612 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1613 struct kvm_mp_state *mp_state);
1614 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1615 struct kvm_mp_state *mp_state);
1616 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1617 struct kvm_guest_debug *dbg);
1618 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1619
1620 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1621 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1622 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1623 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1624 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1625 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1626
1627 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1628 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1629 #endif
1630
1631 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1632 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1633 #else
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)1634 static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
1635 #endif
1636
1637 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
1638 /*
1639 * kvm_arch_shutdown() is invoked immediately prior to forcefully disabling
1640 * hardware virtualization on all CPUs via IPI function calls (in preparation
1641 * for shutdown or reboot), e.g. to allow arch code to prepare for disabling
1642 * virtualization while KVM may be actively running vCPUs.
1643 */
1644 void kvm_arch_shutdown(void);
1645 /*
1646 * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
1647 * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
1648 * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
1649 * sequence, and at the end of the generic hardware disabling sequence.
1650 */
1651 void kvm_arch_enable_virtualization(void);
1652 void kvm_arch_disable_virtualization(void);
1653 /*
1654 * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
1655 * do the actual twiddling of hardware bits. The hooks are called on all
1656 * online CPUs when KVM enables/disabled virtualization, and on a single CPU
1657 * when that CPU is onlined/offlined (including for Resume/Suspend).
1658 */
1659 int kvm_arch_enable_virtualization_cpu(void);
1660 void kvm_arch_disable_virtualization_cpu(void);
1661 #endif
1662 bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
1663 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1664 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1665 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1666 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1667 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1668 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
1669 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1670 void kvm_arch_create_vm_debugfs(struct kvm *kvm);
1671
1672 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1673 /*
1674 * All architectures that want to use vzalloc currently also
1675 * need their own kvm_arch_alloc_vm implementation.
1676 */
kvm_arch_alloc_vm(void)1677 static inline struct kvm *kvm_arch_alloc_vm(void)
1678 {
1679 return kzalloc_obj(struct kvm, GFP_KERNEL_ACCOUNT);
1680 }
1681 #endif
1682
__kvm_arch_free_vm(struct kvm * kvm)1683 static inline void __kvm_arch_free_vm(struct kvm *kvm)
1684 {
1685 kvfree(kvm);
1686 }
1687
1688 #ifndef __KVM_HAVE_ARCH_VM_FREE
kvm_arch_free_vm(struct kvm * kvm)1689 static inline void kvm_arch_free_vm(struct kvm *kvm)
1690 {
1691 __kvm_arch_free_vm(kvm);
1692 }
1693 #endif
1694
1695 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
kvm_arch_flush_remote_tlbs(struct kvm * kvm)1696 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1697 {
1698 return -ENOTSUPP;
1699 }
1700 #else
1701 int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1702 #endif
1703
1704 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)1705 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
1706 gfn_t gfn, u64 nr_pages)
1707 {
1708 return -EOPNOTSUPP;
1709 }
1710 #else
1711 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1712 #endif
1713
1714 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1715 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1716 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1717 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1718 #else
kvm_arch_register_noncoherent_dma(struct kvm * kvm)1719 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1720 {
1721 }
1722
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)1723 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1724 {
1725 }
1726
kvm_arch_has_noncoherent_dma(struct kvm * kvm)1727 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1728 {
1729 return false;
1730 }
1731 #endif
1732
kvm_arch_vcpu_get_wait(struct kvm_vcpu * vcpu)1733 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1734 {
1735 #ifdef __KVM_HAVE_ARCH_WQP
1736 return vcpu->arch.waitp;
1737 #else
1738 return &vcpu->wait;
1739 #endif
1740 }
1741
1742 /*
1743 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
1744 * true if the vCPU was blocking and was awakened, false otherwise.
1745 */
__kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)1746 static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1747 {
1748 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1749 }
1750
kvm_vcpu_is_blocking(struct kvm_vcpu * vcpu)1751 static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1752 {
1753 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1754 }
1755
1756 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1757 /*
1758 * returns true if the virtual interrupt controller is initialized and
1759 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1760 * controller is dynamically instantiated and this is not always true.
1761 */
1762 bool kvm_arch_intc_initialized(struct kvm *kvm);
1763 #else
kvm_arch_intc_initialized(struct kvm * kvm)1764 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1765 {
1766 return true;
1767 }
1768 #endif
1769
1770 #ifdef CONFIG_GUEST_PERF_EVENTS
1771 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1772
1773 void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void),
1774 void (*mediated_pmi_handler)(void));
1775
kvm_register_perf_callbacks(void)1776 static inline void kvm_register_perf_callbacks(void)
1777 {
1778 __kvm_register_perf_callbacks(NULL, NULL);
1779 }
1780
1781 void kvm_unregister_perf_callbacks(void);
1782 #else
kvm_register_perf_callbacks(void)1783 static inline void kvm_register_perf_callbacks(void) {}
kvm_unregister_perf_callbacks(void)1784 static inline void kvm_unregister_perf_callbacks(void) {}
1785 #endif /* CONFIG_GUEST_PERF_EVENTS */
1786
1787 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1788 void kvm_arch_destroy_vm(struct kvm *kvm);
1789
1790 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1791
1792 struct kvm_irq_ack_notifier {
1793 struct hlist_node link;
1794 unsigned gsi;
1795 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1796 };
1797
1798 int kvm_irq_map_gsi(struct kvm *kvm,
1799 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1800 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1801
1802 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1803 bool line_status);
1804 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1805 int irq_source_id, int level, bool line_status);
1806 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1807 struct kvm *kvm, int irq_source_id,
1808 int level, bool line_status);
1809 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1810 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1811 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1812 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1813 struct kvm_irq_ack_notifier *kian);
1814 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1815 struct kvm_irq_ack_notifier *kian);
1816 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1817
1818 /*
1819 * Returns a pointer to the memslot if it contains gfn.
1820 * Otherwise returns NULL.
1821 */
1822 static inline struct kvm_memory_slot *
try_get_memslot(struct kvm_memory_slot * slot,gfn_t gfn)1823 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1824 {
1825 if (!slot)
1826 return NULL;
1827
1828 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1829 return slot;
1830 else
1831 return NULL;
1832 }
1833
1834 /*
1835 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1836 *
1837 * With "approx" set returns the memslot also when the address falls
1838 * in a hole. In that case one of the memslots bordering the hole is
1839 * returned.
1840 */
1841 static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots * slots,gfn_t gfn,bool approx)1842 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1843 {
1844 struct kvm_memory_slot *slot;
1845 struct rb_node *node;
1846 int idx = slots->node_idx;
1847
1848 slot = NULL;
1849 for (node = slots->gfn_tree.rb_node; node; ) {
1850 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1851 if (gfn >= slot->base_gfn) {
1852 if (gfn < slot->base_gfn + slot->npages)
1853 return slot;
1854 node = node->rb_right;
1855 } else
1856 node = node->rb_left;
1857 }
1858
1859 return approx ? slot : NULL;
1860 }
1861
1862 static inline struct kvm_memory_slot *
____gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn,bool approx)1863 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1864 {
1865 struct kvm_memory_slot *slot;
1866
1867 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1868 slot = try_get_memslot(slot, gfn);
1869 if (slot)
1870 return slot;
1871
1872 slot = search_memslots(slots, gfn, approx);
1873 if (slot) {
1874 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1875 return slot;
1876 }
1877
1878 return NULL;
1879 }
1880
1881 /*
1882 * __gfn_to_memslot() and its descendants are here to allow arch code to inline
1883 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
1884 * because that would bloat other code too much.
1885 */
1886 static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn)1887 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1888 {
1889 return ____gfn_to_memslot(slots, gfn, false);
1890 }
1891
1892 static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot * slot,gfn_t gfn)1893 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1894 {
1895 /*
1896 * The index was checked originally in search_memslots. To avoid
1897 * that a malicious guest builds a Spectre gadget out of e.g. page
1898 * table walks, do not let the processor speculate loads outside
1899 * the guest's registered memslots.
1900 */
1901 unsigned long offset = gfn - slot->base_gfn;
1902 offset = array_index_nospec(offset, slot->npages);
1903 return slot->userspace_addr + offset * PAGE_SIZE;
1904 }
1905
memslot_id(struct kvm * kvm,gfn_t gfn)1906 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1907 {
1908 return gfn_to_memslot(kvm, gfn)->id;
1909 }
1910
1911 static inline gfn_t
hva_to_gfn_memslot(unsigned long hva,struct kvm_memory_slot * slot)1912 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1913 {
1914 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1915
1916 return slot->base_gfn + gfn_offset;
1917 }
1918
gfn_to_gpa(gfn_t gfn)1919 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1920 {
1921 return (gpa_t)gfn << PAGE_SHIFT;
1922 }
1923
gpa_to_gfn(gpa_t gpa)1924 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1925 {
1926 return (gfn_t)(gpa >> PAGE_SHIFT);
1927 }
1928
pfn_to_hpa(kvm_pfn_t pfn)1929 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1930 {
1931 return (hpa_t)pfn << PAGE_SHIFT;
1932 }
1933
kvm_is_gpa_in_memslot(struct kvm * kvm,gpa_t gpa)1934 static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
1935 {
1936 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1937
1938 return !kvm_is_error_hva(hva);
1939 }
1940
kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache * gpc)1941 static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
1942 {
1943 lockdep_assert_held(&gpc->lock);
1944
1945 if (!gpc->memslot)
1946 return;
1947
1948 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
1949 }
1950
1951 enum kvm_stat_kind {
1952 KVM_STAT_VM,
1953 KVM_STAT_VCPU,
1954 };
1955
1956 struct kvm_stat_data {
1957 struct kvm *kvm;
1958 const struct kvm_stats_desc *desc;
1959 enum kvm_stat_kind kind;
1960 };
1961
1962 #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1963 .flags = type | unit | base | \
1964 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1965 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1966 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1967 .exponent = exp, \
1968 .size = sz, \
1969 .bucket_size = bsz
1970
1971 #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1972 { \
1973 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1974 .offset = offsetof(struct kvm_vm_stat, generic.stat), \
1975 .name = #stat, \
1976 }
1977 #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1978 { \
1979 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1980 .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \
1981 .name = #stat, \
1982 }
1983 #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1984 { \
1985 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1986 .offset = offsetof(struct kvm_vm_stat, stat), \
1987 .name = #stat, \
1988 }
1989 #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1990 { \
1991 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1992 .offset = offsetof(struct kvm_vcpu_stat, stat), \
1993 .name = #stat, \
1994 }
1995 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1996 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1997 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1998
1999 #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
2000 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
2001 unit, base, exponent, 1, 0)
2002 #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
2003 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
2004 unit, base, exponent, 1, 0)
2005 #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
2006 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
2007 unit, base, exponent, 1, 0)
2008 #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
2009 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
2010 unit, base, exponent, sz, bsz)
2011 #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
2012 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
2013 unit, base, exponent, sz, 0)
2014
2015 /* Cumulative counter, read/write */
2016 #define STATS_DESC_COUNTER(SCOPE, name) \
2017 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
2018 KVM_STATS_BASE_POW10, 0)
2019 /* Instantaneous counter, read only */
2020 #define STATS_DESC_ICOUNTER(SCOPE, name) \
2021 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
2022 KVM_STATS_BASE_POW10, 0)
2023 /* Peak counter, read/write */
2024 #define STATS_DESC_PCOUNTER(SCOPE, name) \
2025 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
2026 KVM_STATS_BASE_POW10, 0)
2027
2028 /* Instantaneous boolean value, read only */
2029 #define STATS_DESC_IBOOLEAN(SCOPE, name) \
2030 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2031 KVM_STATS_BASE_POW10, 0)
2032 /* Peak (sticky) boolean value, read/write */
2033 #define STATS_DESC_PBOOLEAN(SCOPE, name) \
2034 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2035 KVM_STATS_BASE_POW10, 0)
2036
2037 /* Cumulative time in nanosecond */
2038 #define STATS_DESC_TIME_NSEC(SCOPE, name) \
2039 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2040 KVM_STATS_BASE_POW10, -9)
2041 /* Linear histogram for time in nanosecond */
2042 #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
2043 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2044 KVM_STATS_BASE_POW10, -9, sz, bsz)
2045 /* Logarithmic histogram for time in nanosecond */
2046 #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
2047 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2048 KVM_STATS_BASE_POW10, -9, sz)
2049
2050 #define KVM_GENERIC_VM_STATS() \
2051 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
2052 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
2053
2054 #define KVM_GENERIC_VCPU_STATS() \
2055 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
2056 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
2057 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
2058 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
2059 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
2060 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
2061 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
2062 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
2063 HALT_POLL_HIST_COUNT), \
2064 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
2065 HALT_POLL_HIST_COUNT), \
2066 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
2067 HALT_POLL_HIST_COUNT), \
2068 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
2069
2070 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
2071 const struct kvm_stats_desc *desc,
2072 void *stats, size_t size_stats,
2073 char __user *user_buffer, size_t size, loff_t *offset);
2074
2075 /**
2076 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
2077 * statistics data.
2078 *
2079 * @data: start address of the stats data
2080 * @size: the number of bucket of the stats data
2081 * @value: the new value used to update the linear histogram's bucket
2082 * @bucket_size: the size (width) of a bucket
2083 */
kvm_stats_linear_hist_update(u64 * data,size_t size,u64 value,size_t bucket_size)2084 static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
2085 u64 value, size_t bucket_size)
2086 {
2087 size_t index = div64_u64(value, bucket_size);
2088
2089 index = min(index, size - 1);
2090 ++data[index];
2091 }
2092
2093 /**
2094 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
2095 * statistics data.
2096 *
2097 * @data: start address of the stats data
2098 * @size: the number of bucket of the stats data
2099 * @value: the new value used to update the logarithmic histogram's bucket
2100 */
kvm_stats_log_hist_update(u64 * data,size_t size,u64 value)2101 static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
2102 {
2103 size_t index = fls64(value);
2104
2105 index = min(index, size - 1);
2106 ++data[index];
2107 }
2108
2109 #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
2110 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
2111 #define KVM_STATS_LOG_HIST_UPDATE(array, value) \
2112 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
2113
2114
2115 extern const struct kvm_stats_header kvm_vm_stats_header;
2116 extern const struct kvm_stats_desc kvm_vm_stats_desc[];
2117 extern const struct kvm_stats_header kvm_vcpu_stats_header;
2118 extern const struct kvm_stats_desc kvm_vcpu_stats_desc[];
2119
mmu_invalidate_retry(struct kvm * kvm,unsigned long mmu_seq)2120 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
2121 {
2122 if (unlikely(kvm->mmu_invalidate_in_progress))
2123 return 1;
2124 /*
2125 * Ensure the read of mmu_invalidate_in_progress happens before
2126 * the read of mmu_invalidate_seq. This interacts with the
2127 * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
2128 * that the caller either sees the old (non-zero) value of
2129 * mmu_invalidate_in_progress or the new (incremented) value of
2130 * mmu_invalidate_seq.
2131 *
2132 * PowerPC Book3s HV KVM calls this under a per-page lock rather
2133 * than under kvm->mmu_lock, for scalability, so can't rely on
2134 * kvm->mmu_lock to keep things ordered.
2135 */
2136 smp_rmb();
2137 if (kvm->mmu_invalidate_seq != mmu_seq)
2138 return 1;
2139 return 0;
2140 }
2141
mmu_invalidate_retry_gfn(struct kvm * kvm,unsigned long mmu_seq,gfn_t gfn)2142 static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
2143 unsigned long mmu_seq,
2144 gfn_t gfn)
2145 {
2146 lockdep_assert_held(&kvm->mmu_lock);
2147 /*
2148 * If mmu_invalidate_in_progress is non-zero, then the range maintained
2149 * by kvm_mmu_notifier_invalidate_range_start contains all addresses
2150 * that might be being invalidated. Note that it may include some false
2151 * positives, due to shortcuts when handing concurrent invalidations.
2152 */
2153 if (unlikely(kvm->mmu_invalidate_in_progress)) {
2154 /*
2155 * Dropping mmu_lock after bumping mmu_invalidate_in_progress
2156 * but before updating the range is a KVM bug.
2157 */
2158 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
2159 kvm->mmu_invalidate_range_end == INVALID_GPA))
2160 return 1;
2161
2162 if (gfn >= kvm->mmu_invalidate_range_start &&
2163 gfn < kvm->mmu_invalidate_range_end)
2164 return 1;
2165 }
2166
2167 if (kvm->mmu_invalidate_seq != mmu_seq)
2168 return 1;
2169 return 0;
2170 }
2171
2172 /*
2173 * This lockless version of the range-based retry check *must* be paired with a
2174 * call to the locked version after acquiring mmu_lock, i.e. this is safe to
2175 * use only as a pre-check to avoid contending mmu_lock. This version *will*
2176 * get false negatives and false positives.
2177 */
mmu_invalidate_retry_gfn_unsafe(struct kvm * kvm,unsigned long mmu_seq,gfn_t gfn)2178 static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
2179 unsigned long mmu_seq,
2180 gfn_t gfn)
2181 {
2182 /*
2183 * Use READ_ONCE() to ensure the in-progress flag and sequence counter
2184 * are always read from memory, e.g. so that checking for retry in a
2185 * loop won't result in an infinite retry loop. Don't force loads for
2186 * start+end, as the key to avoiding infinite retry loops is observing
2187 * the 1=>0 transition of in-progress, i.e. getting false negatives
2188 * due to stale start+end values is acceptable.
2189 */
2190 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
2191 gfn >= kvm->mmu_invalidate_range_start &&
2192 gfn < kvm->mmu_invalidate_range_end)
2193 return true;
2194
2195 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
2196 }
2197
2198 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2199
2200 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
2201
2202 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
2203 int kvm_set_irq_routing(struct kvm *kvm,
2204 const struct kvm_irq_routing_entry *entries,
2205 unsigned nr,
2206 unsigned flags);
2207 int kvm_init_irq_routing(struct kvm *kvm);
2208 int kvm_set_routing_entry(struct kvm *kvm,
2209 struct kvm_kernel_irq_routing_entry *e,
2210 const struct kvm_irq_routing_entry *ue);
2211 void kvm_free_irq_routing(struct kvm *kvm);
2212
2213 #else
2214
kvm_free_irq_routing(struct kvm * kvm)2215 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
2216
kvm_init_irq_routing(struct kvm * kvm)2217 static inline int kvm_init_irq_routing(struct kvm *kvm)
2218 {
2219 return 0;
2220 }
2221
2222 #endif
2223
2224 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2225
2226 void kvm_eventfd_init(struct kvm *kvm);
2227 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2228
2229 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2230 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2231 void kvm_irqfd_release(struct kvm *kvm);
2232 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2233 unsigned int irqchip,
2234 unsigned int pin);
2235 void kvm_irq_routing_update(struct kvm *);
2236 #else
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)2237 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
2238 {
2239 return -EINVAL;
2240 }
2241
kvm_irqfd_release(struct kvm * kvm)2242 static inline void kvm_irqfd_release(struct kvm *kvm) {}
2243
kvm_notify_irqfd_resampler(struct kvm * kvm,unsigned int irqchip,unsigned int pin)2244 static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2245 unsigned int irqchip,
2246 unsigned int pin)
2247 {
2248 return false;
2249 }
2250 #endif /* CONFIG_HAVE_KVM_IRQCHIP */
2251
2252 void kvm_arch_irq_routing_update(struct kvm *kvm);
2253
__kvm_make_request(int req,struct kvm_vcpu * vcpu)2254 static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
2255 {
2256 /*
2257 * Ensure the rest of the request is published to kvm_check_request's
2258 * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
2259 */
2260 smp_wmb();
2261 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2262 }
2263
kvm_make_request(int req,struct kvm_vcpu * vcpu)2264 static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
2265 {
2266 /*
2267 * Request that don't require vCPU action should never be logged in
2268 * vcpu->requests. The vCPU won't clear the request, so it will stay
2269 * logged indefinitely and prevent the vCPU from entering the guest.
2270 */
2271 BUILD_BUG_ON(!__builtin_constant_p(req) ||
2272 (req & KVM_REQUEST_NO_ACTION));
2273
2274 __kvm_make_request(req, vcpu);
2275 }
2276
2277 #ifndef CONFIG_S390
kvm_make_request_and_kick(int req,struct kvm_vcpu * vcpu)2278 static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
2279 {
2280 kvm_make_request(req, vcpu);
2281 __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
2282 }
2283 #endif
2284
kvm_request_pending(struct kvm_vcpu * vcpu)2285 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2286 {
2287 return READ_ONCE(vcpu->requests);
2288 }
2289
kvm_test_request(int req,struct kvm_vcpu * vcpu)2290 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2291 {
2292 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2293 }
2294
kvm_clear_request(int req,struct kvm_vcpu * vcpu)2295 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2296 {
2297 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2298 }
2299
kvm_check_request(int req,struct kvm_vcpu * vcpu)2300 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2301 {
2302 if (kvm_test_request(req, vcpu)) {
2303 kvm_clear_request(req, vcpu);
2304
2305 /*
2306 * Ensure the rest of the request is visible to kvm_check_request's
2307 * caller. Paired with the smp_wmb in kvm_make_request.
2308 */
2309 smp_mb__after_atomic();
2310 return true;
2311 } else {
2312 return false;
2313 }
2314 }
2315
2316 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
2317 extern bool enable_virt_at_load;
2318 #endif
2319
2320 extern unsigned int halt_poll_ns;
2321 extern unsigned int halt_poll_ns_grow;
2322 extern unsigned int halt_poll_ns_grow_start;
2323 extern unsigned int halt_poll_ns_shrink;
2324
2325 struct kvm_device {
2326 const struct kvm_device_ops *ops;
2327 struct kvm *kvm;
2328 void *private;
2329 struct list_head vm_node;
2330 };
2331
2332 /* create, destroy, and name are mandatory */
2333 struct kvm_device_ops {
2334 const char *name;
2335
2336 /*
2337 * create is called holding kvm->lock and any operations not suitable
2338 * to do while holding the lock should be deferred to init (see
2339 * below).
2340 */
2341 int (*create)(struct kvm_device *dev, u32 type);
2342
2343 /*
2344 * init is called after create if create is successful and is called
2345 * outside of holding kvm->lock.
2346 */
2347 void (*init)(struct kvm_device *dev);
2348
2349 /*
2350 * Destroy is responsible for freeing dev.
2351 *
2352 * Destroy may be called before or after destructors are called
2353 * on emulated I/O regions, depending on whether a reference is
2354 * held by a vcpu or other kvm component that gets destroyed
2355 * after the emulated I/O.
2356 */
2357 void (*destroy)(struct kvm_device *dev);
2358
2359 /*
2360 * Release is an alternative method to free the device. It is
2361 * called when the device file descriptor is closed. Once
2362 * release is called, the destroy method will not be called
2363 * anymore as the device is removed from the device list of
2364 * the VM. kvm->lock is held.
2365 */
2366 void (*release)(struct kvm_device *dev);
2367
2368 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2369 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2370 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2371 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2372 unsigned long arg);
2373 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2374 };
2375
2376 struct kvm_device *kvm_device_from_filp(struct file *filp);
2377 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2378 void kvm_unregister_device_ops(u32 type);
2379
2380 extern struct kvm_device_ops kvm_mpic_ops;
2381 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2382 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2383 extern struct kvm_device_ops kvm_arm_vgic_v5_ops;
2384
2385 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2386
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)2387 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2388 {
2389 vcpu->spin_loop.in_spin_loop = val;
2390 }
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)2391 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2392 {
2393 vcpu->spin_loop.dy_eligible = val;
2394 }
2395
2396 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2397
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)2398 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2399 {
2400 }
2401
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)2402 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2403 {
2404 }
2405 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2406
kvm_is_visible_memslot(struct kvm_memory_slot * memslot)2407 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2408 {
2409 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2410 !(memslot->flags & KVM_MEMSLOT_INVALID));
2411 }
2412
2413 struct kvm_vcpu *kvm_get_running_vcpu(void);
2414 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2415
2416 #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
2417 struct kvm_kernel_irqfd;
2418
2419 bool kvm_arch_has_irq_bypass(void);
2420 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2421 struct irq_bypass_producer *);
2422 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2423 struct irq_bypass_producer *);
2424 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2425 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2426 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
2427 struct kvm_kernel_irq_routing_entry *old,
2428 struct kvm_kernel_irq_routing_entry *new);
2429 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
2430
2431 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2432 /* If we wakeup during the poll time, was it a sucessful poll? */
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)2433 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2434 {
2435 return vcpu->valid_wakeup;
2436 }
2437
2438 #else
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)2439 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2440 {
2441 return true;
2442 }
2443 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
2444
2445 #ifdef CONFIG_HAVE_KVM_NO_POLL
2446 /* Callback that tells if we must not poll */
2447 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2448 #else
kvm_arch_no_poll(struct kvm_vcpu * vcpu)2449 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2450 {
2451 return false;
2452 }
2453 #endif /* CONFIG_HAVE_KVM_NO_POLL */
2454
2455 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2456
2457 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2458 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2459 #else
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)2460 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2461 {
2462 return 0;
2463 }
2464 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
2465
2466 #ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
kvm_handle_signal_exit(struct kvm_vcpu * vcpu)2467 static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2468 {
2469 vcpu->run->exit_reason = KVM_EXIT_INTR;
2470 vcpu->stat.signal_exits++;
2471 }
2472
kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu * vcpu)2473 static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
2474 {
2475 int r = xfer_to_guest_mode_handle_work();
2476
2477 if (r) {
2478 WARN_ON_ONCE(r != -EINTR);
2479 kvm_handle_signal_exit(vcpu);
2480 }
2481 return r;
2482 }
2483 #endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
2484
2485 /*
2486 * If more than one page is being (un)accounted, @virt must be the address of
2487 * the first page of a block of pages what were allocated together (i.e
2488 * accounted together).
2489 *
2490 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
2491 * is thread-safe.
2492 */
kvm_account_pgtable_pages(void * virt,int nr)2493 static inline void kvm_account_pgtable_pages(void *virt, int nr)
2494 {
2495 mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
2496 }
2497
2498 /*
2499 * This defines how many reserved entries we want to keep before we
2500 * kick the vcpu to the userspace to avoid dirty ring full. This
2501 * value can be tuned to higher if e.g. PML is enabled on the host.
2502 */
2503 #define KVM_DIRTY_RING_RSVD_ENTRIES 64
2504
2505 /* Max number of entries allowed for each kvm dirty ring */
2506 #define KVM_DIRTY_RING_MAX_ENTRIES 65536
2507
kvm_prepare_memory_fault_exit(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t size,bool is_write,bool is_exec,bool is_private)2508 static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
2509 gpa_t gpa, gpa_t size,
2510 bool is_write, bool is_exec,
2511 bool is_private)
2512 {
2513 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
2514 vcpu->run->memory_fault.gpa = gpa;
2515 vcpu->run->memory_fault.size = size;
2516
2517 /* RWX flags are not (yet) defined or communicated to userspace. */
2518 vcpu->run->memory_fault.flags = 0;
2519 if (is_private)
2520 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
2521 }
2522
kvm_memslot_is_gmem_only(const struct kvm_memory_slot * slot)2523 static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot)
2524 {
2525 if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
2526 return false;
2527
2528 return slot->flags & KVM_MEMSLOT_GMEM_ONLY;
2529 }
2530
2531 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_get_memory_attributes(struct kvm * kvm,gfn_t gfn)2532 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
2533 {
2534 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
2535 }
2536
2537 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2538 unsigned long mask, unsigned long attrs);
2539 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
2540 struct kvm_gfn_range *range);
2541 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
2542 struct kvm_gfn_range *range);
2543
kvm_mem_is_private(struct kvm * kvm,gfn_t gfn)2544 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2545 {
2546 return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
2547 }
2548 #else
kvm_mem_is_private(struct kvm * kvm,gfn_t gfn)2549 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2550 {
2551 return false;
2552 }
2553 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2554
2555 #ifdef CONFIG_KVM_GUEST_MEMFD
2556 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
2557 gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
2558 int *max_order);
2559 #else
kvm_gmem_get_pfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,struct page ** page,int * max_order)2560 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
2561 struct kvm_memory_slot *slot, gfn_t gfn,
2562 kvm_pfn_t *pfn, struct page **page,
2563 int *max_order)
2564 {
2565 KVM_BUG_ON(1, kvm);
2566 return -EIO;
2567 }
2568 #endif /* CONFIG_KVM_GUEST_MEMFD */
2569
2570 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
2571 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
2572 #endif
2573
2574 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
2575 /**
2576 * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
2577 *
2578 * @kvm: KVM instance
2579 * @gfn: starting GFN to be populated
2580 * @src: userspace-provided buffer containing data to copy into GFN range
2581 * (passed to @post_populate, and incremented on each iteration
2582 * if not NULL). Must be page-aligned.
2583 * @npages: number of pages to copy from userspace-buffer
2584 * @post_populate: callback to issue for each gmem page that backs the GPA
2585 * range
2586 * @opaque: opaque data to pass to @post_populate callback
2587 *
2588 * This is primarily intended for cases where a gmem-backed GPA range needs
2589 * to be initialized with userspace-provided data prior to being mapped into
2590 * the guest as a private page. This should be called with the slots->lock
2591 * held so that caller-enforced invariants regarding the expected memory
2592 * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
2593 *
2594 * Returns the number of pages that were populated.
2595 */
2596 typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2597 struct page *page, void *opaque);
2598
2599 long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
2600 kvm_gmem_populate_cb post_populate, void *opaque);
2601 #endif
2602
2603 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
2604 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
2605 #endif
2606
2607 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
2608 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
2609 struct kvm_pre_fault_memory *range);
2610 #endif
2611
2612 #endif
2613