1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HOST_H
3 #define __KVM_HOST_H
4
5 #include <linux/entry-virt.h>
6 #include <linux/types.h>
7 #include <linux/hardirq.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/spinlock.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/sched/stat.h>
14 #include <linux/bug.h>
15 #include <linux/minmax.h>
16 #include <linux/mm.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/preempt.h>
19 #include <linux/msi.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <linux/irqbypass.h>
28 #include <linux/rcuwait.h>
29 #include <linux/refcount.h>
30 #include <linux/nospec.h>
31 #include <linux/notifier.h>
32 #include <linux/ftrace.h>
33 #include <linux/hashtable.h>
34 #include <linux/instrumentation.h>
35 #include <linux/interval_tree.h>
36 #include <linux/rbtree.h>
37 #include <linux/xarray.h>
38 #include <asm/signal.h>
39
40 #include <linux/kvm.h>
41 #include <linux/kvm_para.h>
42
43 #include <linux/kvm_types.h>
44
45 #include <asm/kvm_host.h>
46 #include <linux/kvm_dirty_ring.h>
47
48 #ifndef KVM_MAX_VCPU_IDS
49 #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
50 #endif
51
52 /*
53 * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
54 * used in kvm, other bits are visible for userspace which are defined in
55 * include/uapi/linux/kvm.h.
56 */
57 #define KVM_MEMSLOT_INVALID (1UL << 16)
58 #define KVM_MEMSLOT_GMEM_ONLY (1UL << 17)
59
60 /*
61 * Bit 63 of the memslot generation number is an "update in-progress flag",
62 * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
63 * This flag effectively creates a unique generation number that is used to
64 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
65 * i.e. may (or may not) have come from the previous memslots generation.
66 *
67 * This is necessary because the actual memslots update is not atomic with
68 * respect to the generation number update. Updating the generation number
69 * first would allow a vCPU to cache a spte from the old memslots using the
70 * new generation number, and updating the generation number after switching
71 * to the new memslots would allow cache hits using the old generation number
72 * to reference the defunct memslots.
73 *
74 * This mechanism is used to prevent getting hits in KVM's caches while a
75 * memslot update is in-progress, and to prevent cache hits *after* updating
76 * the actual generation number against accesses that were inserted into the
77 * cache *before* the memslots were updated.
78 */
79 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
80
81 /* Two fragments for cross MMIO pages. */
82 #define KVM_MAX_MMIO_FRAGMENTS 2
83
84 #ifndef KVM_MAX_NR_ADDRESS_SPACES
85 #define KVM_MAX_NR_ADDRESS_SPACES 1
86 #endif
87
88 /*
89 * For the normal pfn, the highest 12 bits should be zero,
90 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
91 * mask bit 63 to indicate the noslot pfn.
92 */
93 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
94 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
95 #define KVM_PFN_NOSLOT (0x1ULL << 63)
96
97 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
98 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
99 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
100 #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
101 #define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
102
103 /*
104 * error pfns indicate that the gfn is in slot but faild to
105 * translate it to pfn on host.
106 */
is_error_pfn(kvm_pfn_t pfn)107 static inline bool is_error_pfn(kvm_pfn_t pfn)
108 {
109 return !!(pfn & KVM_PFN_ERR_MASK);
110 }
111
112 /*
113 * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
114 * by a pending signal. Note, the signal may or may not be fatal.
115 */
is_sigpending_pfn(kvm_pfn_t pfn)116 static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
117 {
118 return pfn == KVM_PFN_ERR_SIGPENDING;
119 }
120
121 /*
122 * error_noslot pfns indicate that the gfn can not be
123 * translated to pfn - it is not in slot or failed to
124 * translate it to pfn.
125 */
is_error_noslot_pfn(kvm_pfn_t pfn)126 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
127 {
128 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
129 }
130
131 /* noslot pfn indicates that the gfn is not in slot. */
is_noslot_pfn(kvm_pfn_t pfn)132 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
133 {
134 return pfn == KVM_PFN_NOSLOT;
135 }
136
137 /*
138 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
139 * provide own defines and kvm_is_error_hva
140 */
141 #ifndef KVM_HVA_ERR_BAD
142
143 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
144 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
145
kvm_is_error_hva(unsigned long addr)146 static inline bool kvm_is_error_hva(unsigned long addr)
147 {
148 return addr >= PAGE_OFFSET;
149 }
150
151 #endif
152
kvm_is_error_gpa(gpa_t gpa)153 static inline bool kvm_is_error_gpa(gpa_t gpa)
154 {
155 return gpa == INVALID_GPA;
156 }
157
158 #define KVM_REQUEST_MASK GENMASK(7,0)
159 #define KVM_REQUEST_NO_WAKEUP BIT(8)
160 #define KVM_REQUEST_WAIT BIT(9)
161 #define KVM_REQUEST_NO_ACTION BIT(10)
162 /*
163 * Architecture-independent vcpu->requests bit members
164 * Bits 3-7 are reserved for more arch-independent bits.
165 */
166 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
167 #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
168 #define KVM_REQ_UNBLOCK 2
169 #define KVM_REQ_DIRTY_RING_SOFT_FULL 3
170 #define KVM_REQUEST_ARCH_BASE 8
171
172 /*
173 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
174 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
175 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
176 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
177 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
178 * guarantee the vCPU received an IPI and has actually exited guest mode.
179 */
180 #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
181
182 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
183 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
184 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
185 })
186 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
187
188 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
189 unsigned long *vcpu_bitmap);
190 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
191
192 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
193 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
194 #define KVM_PIT_IRQ_SOURCE_ID 2
195
196 extern struct mutex kvm_lock;
197 extern struct list_head vm_list;
198
199 struct kvm_io_range {
200 gpa_t addr;
201 int len;
202 struct kvm_io_device *dev;
203 };
204
205 #define NR_IOBUS_DEVS 1000
206
207 struct kvm_io_bus {
208 int dev_count;
209 int ioeventfd_count;
210 struct rcu_head rcu;
211 struct kvm_io_range range[];
212 };
213
214 enum kvm_bus {
215 KVM_MMIO_BUS,
216 KVM_PIO_BUS,
217 KVM_VIRTIO_CCW_NOTIFY_BUS,
218 KVM_FAST_MMIO_BUS,
219 KVM_IOCSR_BUS,
220 KVM_NR_BUSES
221 };
222
223 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
224 int len, const void *val);
225 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
226 gpa_t addr, int len, const void *val, long cookie);
227 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
228 int len, void *val);
229 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
230 int len, struct kvm_io_device *dev);
231 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
232 struct kvm_io_device *dev);
233 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
234 gpa_t addr);
235
236 #ifdef CONFIG_KVM_ASYNC_PF
237 struct kvm_async_pf {
238 struct work_struct work;
239 struct list_head link;
240 struct list_head queue;
241 struct kvm_vcpu *vcpu;
242 gpa_t cr2_or_gpa;
243 unsigned long addr;
244 struct kvm_arch_async_pf arch;
245 bool wakeup_all;
246 bool notpresent_injected;
247 };
248
249 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
250 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
251 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
252 unsigned long hva, struct kvm_arch_async_pf *arch);
253 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
254 #endif
255
256 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
257 union kvm_mmu_notifier_arg {
258 unsigned long attributes;
259 };
260
261 enum kvm_gfn_range_filter {
262 KVM_FILTER_SHARED = BIT(0),
263 KVM_FILTER_PRIVATE = BIT(1),
264 };
265
266 struct kvm_gfn_range {
267 struct kvm_memory_slot *slot;
268 gfn_t start;
269 gfn_t end;
270 union kvm_mmu_notifier_arg arg;
271 enum kvm_gfn_range_filter attr_filter;
272 bool may_block;
273 bool lockless;
274 };
275 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
276 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
277 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
278 #endif
279
280 enum {
281 OUTSIDE_GUEST_MODE,
282 IN_GUEST_MODE,
283 EXITING_GUEST_MODE,
284 READING_SHADOW_PAGE_TABLES,
285 };
286
287 struct kvm_host_map {
288 /*
289 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
290 * a 'struct page' for it. When using mem= kernel parameter some memory
291 * can be used as guest memory but they are not managed by host
292 * kernel).
293 */
294 struct page *pinned_page;
295 struct page *page;
296 void *hva;
297 kvm_pfn_t pfn;
298 kvm_pfn_t gfn;
299 bool writable;
300 };
301
302 /*
303 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
304 * directly to check for that.
305 */
kvm_vcpu_mapped(struct kvm_host_map * map)306 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
307 {
308 return !!map->hva;
309 }
310
kvm_vcpu_can_poll(ktime_t cur,ktime_t stop)311 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
312 {
313 return single_task_running() && !need_resched() && ktime_before(cur, stop);
314 }
315
316 /*
317 * Sometimes a large or cross-page mmio needs to be broken up into separate
318 * exits for userspace servicing.
319 */
320 struct kvm_mmio_fragment {
321 gpa_t gpa;
322 void *data;
323 unsigned len;
324 };
325
326 struct kvm_vcpu {
327 struct kvm *kvm;
328 #ifdef CONFIG_PREEMPT_NOTIFIERS
329 struct preempt_notifier preempt_notifier;
330 #endif
331 int cpu;
332 int vcpu_id; /* id given by userspace at creation */
333 int vcpu_idx; /* index into kvm->vcpu_array */
334 int ____srcu_idx; /* Don't use this directly. You've been warned. */
335 #ifdef CONFIG_PROVE_RCU
336 int srcu_depth;
337 #endif
338 int mode;
339 u64 requests;
340 unsigned long guest_debug;
341
342 struct mutex mutex;
343 struct kvm_run *run;
344
345 #ifndef __KVM_HAVE_ARCH_WQP
346 struct rcuwait wait;
347 #endif
348 struct pid *pid;
349 rwlock_t pid_lock;
350 int sigset_active;
351 sigset_t sigset;
352 unsigned int halt_poll_ns;
353 bool valid_wakeup;
354
355 #ifdef CONFIG_HAS_IOMEM
356 int mmio_needed;
357 int mmio_read_completed;
358 int mmio_is_write;
359 int mmio_cur_fragment;
360 int mmio_nr_fragments;
361 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
362 #endif
363
364 #ifdef CONFIG_KVM_ASYNC_PF
365 struct {
366 u32 queued;
367 struct list_head queue;
368 struct list_head done;
369 spinlock_t lock;
370 } async_pf;
371 #endif
372
373 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
374 /*
375 * Cpu relax intercept or pause loop exit optimization
376 * in_spin_loop: set when a vcpu does a pause loop exit
377 * or cpu relax intercepted.
378 * dy_eligible: indicates whether vcpu is eligible for directed yield.
379 */
380 struct {
381 bool in_spin_loop;
382 bool dy_eligible;
383 } spin_loop;
384 #endif
385 bool wants_to_run;
386 bool preempted;
387 bool ready;
388 bool scheduled_out;
389 struct kvm_vcpu_arch arch;
390 struct kvm_vcpu_stat stat;
391 char stats_id[KVM_STATS_NAME_SIZE];
392 struct kvm_dirty_ring dirty_ring;
393
394 /*
395 * The most recently used memslot by this vCPU and the slots generation
396 * for which it is valid.
397 * No wraparound protection is needed since generations won't overflow in
398 * thousands of years, even assuming 1M memslot operations per second.
399 */
400 struct kvm_memory_slot *last_used_slot;
401 u64 last_used_slot_gen;
402 };
403
404 /*
405 * Start accounting time towards a guest.
406 * Must be called before entering guest context.
407 */
guest_timing_enter_irqoff(void)408 static __always_inline void guest_timing_enter_irqoff(void)
409 {
410 /*
411 * This is running in ioctl context so its safe to assume that it's the
412 * stime pending cputime to flush.
413 */
414 instrumentation_begin();
415 vtime_account_guest_enter();
416 instrumentation_end();
417 }
418
419 /*
420 * Enter guest context and enter an RCU extended quiescent state.
421 *
422 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
423 * unsafe to use any code which may directly or indirectly use RCU, tracing
424 * (including IRQ flag tracing), or lockdep. All code in this period must be
425 * non-instrumentable.
426 */
guest_context_enter_irqoff(void)427 static __always_inline void guest_context_enter_irqoff(void)
428 {
429 /*
430 * KVM does not hold any references to rcu protected data when it
431 * switches CPU into a guest mode. In fact switching to a guest mode
432 * is very similar to exiting to userspace from rcu point of view. In
433 * addition CPU may stay in a guest mode for quite a long time (up to
434 * one time slice). Lets treat guest mode as quiescent state, just like
435 * we do with user-mode execution.
436 */
437 if (!context_tracking_guest_enter()) {
438 instrumentation_begin();
439 rcu_virt_note_context_switch();
440 instrumentation_end();
441 }
442 }
443
444 /*
445 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
446 * guest_state_enter_irqoff().
447 */
guest_enter_irqoff(void)448 static __always_inline void guest_enter_irqoff(void)
449 {
450 guest_timing_enter_irqoff();
451 guest_context_enter_irqoff();
452 }
453
454 /**
455 * guest_state_enter_irqoff - Fixup state when entering a guest
456 *
457 * Entry to a guest will enable interrupts, but the kernel state is interrupts
458 * disabled when this is invoked. Also tell RCU about it.
459 *
460 * 1) Trace interrupts on state
461 * 2) Invoke context tracking if enabled to adjust RCU state
462 * 3) Tell lockdep that interrupts are enabled
463 *
464 * Invoked from architecture specific code before entering a guest.
465 * Must be called with interrupts disabled and the caller must be
466 * non-instrumentable.
467 * The caller has to invoke guest_timing_enter_irqoff() before this.
468 *
469 * Note: this is analogous to exit_to_user_mode().
470 */
guest_state_enter_irqoff(void)471 static __always_inline void guest_state_enter_irqoff(void)
472 {
473 instrumentation_begin();
474 trace_hardirqs_on_prepare();
475 lockdep_hardirqs_on_prepare();
476 instrumentation_end();
477
478 guest_context_enter_irqoff();
479 lockdep_hardirqs_on(CALLER_ADDR0);
480 }
481
482 /*
483 * Exit guest context and exit an RCU extended quiescent state.
484 *
485 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
486 * unsafe to use any code which may directly or indirectly use RCU, tracing
487 * (including IRQ flag tracing), or lockdep. All code in this period must be
488 * non-instrumentable.
489 */
guest_context_exit_irqoff(void)490 static __always_inline void guest_context_exit_irqoff(void)
491 {
492 /*
493 * Guest mode is treated as a quiescent state, see
494 * guest_context_enter_irqoff() for more details.
495 */
496 if (!context_tracking_guest_exit()) {
497 instrumentation_begin();
498 rcu_virt_note_context_switch();
499 instrumentation_end();
500 }
501 }
502
503 /*
504 * Stop accounting time towards a guest.
505 * Must be called after exiting guest context.
506 */
guest_timing_exit_irqoff(void)507 static __always_inline void guest_timing_exit_irqoff(void)
508 {
509 instrumentation_begin();
510 /* Flush the guest cputime we spent on the guest */
511 vtime_account_guest_exit();
512 instrumentation_end();
513 }
514
515 /*
516 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
517 * guest_timing_exit_irqoff().
518 */
guest_exit_irqoff(void)519 static __always_inline void guest_exit_irqoff(void)
520 {
521 guest_context_exit_irqoff();
522 guest_timing_exit_irqoff();
523 }
524
guest_exit(void)525 static inline void guest_exit(void)
526 {
527 unsigned long flags;
528
529 local_irq_save(flags);
530 guest_exit_irqoff();
531 local_irq_restore(flags);
532 }
533
534 /**
535 * guest_state_exit_irqoff - Establish state when returning from guest mode
536 *
537 * Entry from a guest disables interrupts, but guest mode is traced as
538 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
539 *
540 * 1) Tell lockdep that interrupts are disabled
541 * 2) Invoke context tracking if enabled to reactivate RCU
542 * 3) Trace interrupts off state
543 *
544 * Invoked from architecture specific code after exiting a guest.
545 * Must be invoked with interrupts disabled and the caller must be
546 * non-instrumentable.
547 * The caller has to invoke guest_timing_exit_irqoff() after this.
548 *
549 * Note: this is analogous to enter_from_user_mode().
550 */
guest_state_exit_irqoff(void)551 static __always_inline void guest_state_exit_irqoff(void)
552 {
553 lockdep_hardirqs_off(CALLER_ADDR0);
554 guest_context_exit_irqoff();
555
556 instrumentation_begin();
557 trace_hardirqs_off_finish();
558 instrumentation_end();
559 }
560
kvm_vcpu_exiting_guest_mode(struct kvm_vcpu * vcpu)561 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
562 {
563 /*
564 * The memory barrier ensures a previous write to vcpu->requests cannot
565 * be reordered with the read of vcpu->mode. It pairs with the general
566 * memory barrier following the write of vcpu->mode in VCPU RUN.
567 */
568 smp_mb__before_atomic();
569 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
570 }
571
572 /*
573 * Some of the bitops functions do not support too long bitmaps.
574 * This number must be determined not to exceed such limits.
575 */
576 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
577
578 /*
579 * Since at idle each memslot belongs to two memslot sets it has to contain
580 * two embedded nodes for each data structure that it forms a part of.
581 *
582 * Two memslot sets (one active and one inactive) are necessary so the VM
583 * continues to run on one memslot set while the other is being modified.
584 *
585 * These two memslot sets normally point to the same set of memslots.
586 * They can, however, be desynchronized when performing a memslot management
587 * operation by replacing the memslot to be modified by its copy.
588 * After the operation is complete, both memslot sets once again point to
589 * the same, common set of memslot data.
590 *
591 * The memslots themselves are independent of each other so they can be
592 * individually added or deleted.
593 */
594 struct kvm_memory_slot {
595 struct hlist_node id_node[2];
596 struct interval_tree_node hva_node[2];
597 struct rb_node gfn_node[2];
598 gfn_t base_gfn;
599 unsigned long npages;
600 unsigned long *dirty_bitmap;
601 struct kvm_arch_memory_slot arch;
602 unsigned long userspace_addr;
603 u32 flags;
604 short id;
605 u16 as_id;
606
607 #ifdef CONFIG_KVM_GUEST_MEMFD
608 struct {
609 /*
610 * Writes protected by kvm->slots_lock. Acquiring a
611 * reference via kvm_gmem_get_file() is protected by
612 * either kvm->slots_lock or kvm->srcu.
613 */
614 struct file *file;
615 pgoff_t pgoff;
616 } gmem;
617 #endif
618 };
619
kvm_slot_has_gmem(const struct kvm_memory_slot * slot)620 static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot)
621 {
622 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
623 }
624
kvm_slot_dirty_track_enabled(const struct kvm_memory_slot * slot)625 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
626 {
627 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
628 }
629
kvm_dirty_bitmap_bytes(struct kvm_memory_slot * memslot)630 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
631 {
632 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
633 }
634
kvm_second_dirty_bitmap(struct kvm_memory_slot * memslot)635 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
636 {
637 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
638
639 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
640 }
641
642 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS
643 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
644 #endif
645
646 struct kvm_s390_adapter_int {
647 u64 ind_addr;
648 u64 ind_gaddr;
649 u64 summary_addr;
650 u64 summary_gaddr;
651 u64 ind_offset;
652 u32 summary_offset;
653 u32 adapter_id;
654 };
655
656 struct kvm_hv_sint {
657 u32 vcpu;
658 u32 sint;
659 };
660
661 struct kvm_xen_evtchn {
662 u32 port;
663 u32 vcpu_id;
664 int vcpu_idx;
665 u32 priority;
666 };
667
668 struct kvm_kernel_irq_routing_entry {
669 u32 gsi;
670 u32 type;
671 int (*set)(struct kvm_kernel_irq_routing_entry *e,
672 struct kvm *kvm, int irq_source_id, int level,
673 bool line_status);
674 union {
675 struct {
676 unsigned irqchip;
677 unsigned pin;
678 } irqchip;
679 struct {
680 u32 address_lo;
681 u32 address_hi;
682 u32 data;
683 u32 flags;
684 u32 devid;
685 } msi;
686 struct kvm_s390_adapter_int adapter;
687 struct kvm_hv_sint hv_sint;
688 struct kvm_xen_evtchn xen_evtchn;
689 };
690 struct hlist_node link;
691 };
692
693 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
694 struct kvm_irq_routing_table {
695 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
696 u32 nr_rt_entries;
697 /*
698 * Array indexed by gsi. Each entry contains list of irq chips
699 * the gsi is connected to.
700 */
701 struct hlist_head map[] __counted_by(nr_rt_entries);
702 };
703 #endif
704
705 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
706
707 #ifndef KVM_INTERNAL_MEM_SLOTS
708 #define KVM_INTERNAL_MEM_SLOTS 0
709 #endif
710
711 #define KVM_MEM_SLOTS_NUM SHRT_MAX
712 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
713
714 #if KVM_MAX_NR_ADDRESS_SPACES == 1
kvm_arch_nr_memslot_as_ids(struct kvm * kvm)715 static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
716 {
717 return KVM_MAX_NR_ADDRESS_SPACES;
718 }
719
kvm_arch_vcpu_memslots_id(struct kvm_vcpu * vcpu)720 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
721 {
722 return 0;
723 }
724 #endif
725
726 #ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_arch_has_private_mem(struct kvm * kvm)727 static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
728 {
729 return false;
730 }
731 #endif
732
733 #ifdef CONFIG_KVM_GUEST_MEMFD
734 bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
735
kvm_gmem_get_supported_flags(struct kvm * kvm)736 static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
737 {
738 u64 flags = GUEST_MEMFD_FLAG_MMAP;
739
740 if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
741 flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
742
743 return flags;
744 }
745 #endif
746
747 #ifndef kvm_arch_has_readonly_mem
kvm_arch_has_readonly_mem(struct kvm * kvm)748 static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
749 {
750 return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
751 }
752 #endif
753
754 struct kvm_memslots {
755 u64 generation;
756 atomic_long_t last_used_slot;
757 struct rb_root_cached hva_tree;
758 struct rb_root gfn_tree;
759 /*
760 * The mapping table from slot id to memslot.
761 *
762 * 7-bit bucket count matches the size of the old id to index array for
763 * 512 slots, while giving good performance with this slot count.
764 * Higher bucket counts bring only small performance improvements but
765 * always result in higher memory usage (even for lower memslot counts).
766 */
767 DECLARE_HASHTABLE(id_hash, 7);
768 int node_idx;
769 };
770
771 struct kvm {
772 #ifdef KVM_HAVE_MMU_RWLOCK
773 rwlock_t mmu_lock;
774 #else
775 spinlock_t mmu_lock;
776 #endif /* KVM_HAVE_MMU_RWLOCK */
777
778 struct mutex slots_lock;
779
780 /*
781 * Protects the arch-specific fields of struct kvm_memory_slots in
782 * use by the VM. To be used under the slots_lock (above) or in a
783 * kvm->srcu critical section where acquiring the slots_lock would
784 * lead to deadlock with the synchronize_srcu in
785 * kvm_swap_active_memslots().
786 */
787 struct mutex slots_arch_lock;
788 struct mm_struct *mm; /* userspace tied to this vm */
789 unsigned long nr_memslot_pages;
790 /* The two memslot sets - active and inactive (per address space) */
791 struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
792 /* The current active memslot set for each address space */
793 struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
794 struct xarray vcpu_array;
795 /*
796 * Protected by slots_lock, but can be read outside if an
797 * incorrect answer is acceptable.
798 */
799 atomic_t nr_memslots_dirty_logging;
800
801 /* Used to wait for completion of MMU notifiers. */
802 spinlock_t mn_invalidate_lock;
803 unsigned long mn_active_invalidate_count;
804 struct rcuwait mn_memslots_update_rcuwait;
805
806 /* For management / invalidation of gfn_to_pfn_caches */
807 spinlock_t gpc_lock;
808 struct list_head gpc_list;
809
810 /*
811 * created_vcpus is protected by kvm->lock, and is incremented
812 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
813 * incremented after storing the kvm_vcpu pointer in vcpus,
814 * and is accessed atomically.
815 */
816 atomic_t online_vcpus;
817 int max_vcpus;
818 int created_vcpus;
819 int last_boosted_vcpu;
820 struct list_head vm_list;
821 struct mutex lock;
822 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
823 #ifdef CONFIG_HAVE_KVM_IRQCHIP
824 struct {
825 spinlock_t lock;
826 struct list_head items;
827 /* resampler_list update side is protected by resampler_lock. */
828 struct list_head resampler_list;
829 struct mutex resampler_lock;
830 } irqfds;
831 #endif
832 struct list_head ioeventfds;
833 struct kvm_vm_stat stat;
834 struct kvm_arch arch;
835 refcount_t users_count;
836 #ifdef CONFIG_KVM_MMIO
837 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
838 spinlock_t ring_lock;
839 struct list_head coalesced_zones;
840 #endif
841
842 struct mutex irq_lock;
843 #ifdef CONFIG_HAVE_KVM_IRQCHIP
844 /*
845 * Update side is protected by irq_lock.
846 */
847 struct kvm_irq_routing_table __rcu *irq_routing;
848
849 struct hlist_head irq_ack_notifier_list;
850 #endif
851
852 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
853 struct mmu_notifier mmu_notifier;
854 unsigned long mmu_invalidate_seq;
855 long mmu_invalidate_in_progress;
856 gfn_t mmu_invalidate_range_start;
857 gfn_t mmu_invalidate_range_end;
858 #endif
859 struct list_head devices;
860 u64 manual_dirty_log_protect;
861 struct dentry *debugfs_dentry;
862 struct kvm_stat_data **debugfs_stat_data;
863 struct srcu_struct srcu;
864 struct srcu_struct irq_srcu;
865 pid_t userspace_pid;
866 bool override_halt_poll_ns;
867 unsigned int max_halt_poll_ns;
868 u32 dirty_ring_size;
869 bool dirty_ring_with_bitmap;
870 bool vm_bugged;
871 bool vm_dead;
872
873 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
874 struct notifier_block pm_notifier;
875 #endif
876 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
877 /* Protected by slots_lock (for writes) and RCU (for reads) */
878 struct xarray mem_attr_array;
879 #endif
880 char stats_id[KVM_STATS_NAME_SIZE];
881 };
882
883 #define kvm_err(fmt, ...) \
884 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
885 #define kvm_info(fmt, ...) \
886 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
887 #define kvm_debug(fmt, ...) \
888 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
889 #define kvm_debug_ratelimited(fmt, ...) \
890 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
891 ## __VA_ARGS__)
892 #define kvm_pr_unimpl(fmt, ...) \
893 pr_err_ratelimited("kvm [%i]: " fmt, \
894 task_tgid_nr(current), ## __VA_ARGS__)
895
896 /* The guest did something we don't support. */
897 #define vcpu_unimpl(vcpu, fmt, ...) \
898 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
899 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
900
901 #define vcpu_debug(vcpu, fmt, ...) \
902 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
903 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
904 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
905 ## __VA_ARGS__)
906 #define vcpu_err(vcpu, fmt, ...) \
907 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
908
kvm_vm_dead(struct kvm * kvm)909 static inline void kvm_vm_dead(struct kvm *kvm)
910 {
911 kvm->vm_dead = true;
912 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
913 }
914
kvm_vm_bugged(struct kvm * kvm)915 static inline void kvm_vm_bugged(struct kvm *kvm)
916 {
917 kvm->vm_bugged = true;
918 kvm_vm_dead(kvm);
919 }
920
921
922 #define KVM_BUG(cond, kvm, fmt...) \
923 ({ \
924 bool __ret = !!(cond); \
925 \
926 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
927 kvm_vm_bugged(kvm); \
928 unlikely(__ret); \
929 })
930
931 #define KVM_BUG_ON(cond, kvm) \
932 ({ \
933 bool __ret = !!(cond); \
934 \
935 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
936 kvm_vm_bugged(kvm); \
937 unlikely(__ret); \
938 })
939
940 /*
941 * Note, "data corruption" refers to corruption of host kernel data structures,
942 * not guest data. Guest data corruption, suspected or confirmed, that is tied
943 * and contained to a single VM should *never* BUG() and potentially panic the
944 * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
945 * is corrupted and that corruption can have a cascading effect to other parts
946 * of the hosts and/or to other VMs.
947 */
948 #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
949 ({ \
950 bool __ret = !!(cond); \
951 \
952 if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
953 BUG_ON(__ret); \
954 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
955 kvm_vm_bugged(kvm); \
956 unlikely(__ret); \
957 })
958
kvm_vcpu_srcu_read_lock(struct kvm_vcpu * vcpu)959 static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
960 {
961 #ifdef CONFIG_PROVE_RCU
962 WARN_ONCE(vcpu->srcu_depth++,
963 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
964 #endif
965 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
966 }
967
kvm_vcpu_srcu_read_unlock(struct kvm_vcpu * vcpu)968 static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
969 {
970 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
971
972 #ifdef CONFIG_PROVE_RCU
973 WARN_ONCE(--vcpu->srcu_depth,
974 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
975 #endif
976 }
977
kvm_dirty_log_manual_protect_and_init_set(struct kvm * kvm)978 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
979 {
980 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
981 }
982
983 /*
984 * Get a bus reference under the update-side lock. No long-term SRCU reader
985 * references are permitted, to avoid stale reads vs concurrent IO
986 * registrations.
987 */
kvm_get_bus(struct kvm * kvm,enum kvm_bus idx)988 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
989 {
990 return rcu_dereference_protected(kvm->buses[idx],
991 lockdep_is_held(&kvm->slots_lock));
992 }
993
kvm_get_vcpu(struct kvm * kvm,int i)994 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
995 {
996 int num_vcpus = atomic_read(&kvm->online_vcpus);
997
998 /*
999 * Explicitly verify the target vCPU is online, as the anti-speculation
1000 * logic only limits the CPU's ability to speculate, e.g. given a "bad"
1001 * index, clamping the index to 0 would return vCPU0, not NULL.
1002 */
1003 if (i >= num_vcpus)
1004 return NULL;
1005
1006 i = array_index_nospec(i, num_vcpus);
1007
1008 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
1009 smp_rmb();
1010 return xa_load(&kvm->vcpu_array, i);
1011 }
1012
1013 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
1014 if (atomic_read(&kvm->online_vcpus)) \
1015 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
1016 (atomic_read(&kvm->online_vcpus) - 1))
1017
kvm_get_vcpu_by_id(struct kvm * kvm,int id)1018 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
1019 {
1020 struct kvm_vcpu *vcpu = NULL;
1021 unsigned long i;
1022
1023 if (id < 0)
1024 return NULL;
1025 if (id < KVM_MAX_VCPUS)
1026 vcpu = kvm_get_vcpu(kvm, id);
1027 if (vcpu && vcpu->vcpu_id == id)
1028 return vcpu;
1029 kvm_for_each_vcpu(i, vcpu, kvm)
1030 if (vcpu->vcpu_id == id)
1031 return vcpu;
1032 return NULL;
1033 }
1034
1035 void kvm_destroy_vcpus(struct kvm *kvm);
1036
1037 int kvm_trylock_all_vcpus(struct kvm *kvm);
1038 int kvm_lock_all_vcpus(struct kvm *kvm);
1039 void kvm_unlock_all_vcpus(struct kvm *kvm);
1040
1041 void vcpu_load(struct kvm_vcpu *vcpu);
1042 void vcpu_put(struct kvm_vcpu *vcpu);
1043
1044 #ifdef CONFIG_KVM_IOAPIC
1045 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
1046 #else
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)1047 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
1048 {
1049 }
1050 #endif
1051
1052 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1053 int kvm_irqfd_init(void);
1054 void kvm_irqfd_exit(void);
1055 #else
kvm_irqfd_init(void)1056 static inline int kvm_irqfd_init(void)
1057 {
1058 return 0;
1059 }
1060
kvm_irqfd_exit(void)1061 static inline void kvm_irqfd_exit(void)
1062 {
1063 }
1064 #endif
1065 int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
1066 void kvm_exit(void);
1067
1068 void kvm_get_kvm(struct kvm *kvm);
1069 bool kvm_get_kvm_safe(struct kvm *kvm);
1070 void kvm_put_kvm(struct kvm *kvm);
1071 bool file_is_kvm(struct file *file);
1072 void kvm_put_kvm_no_destroy(struct kvm *kvm);
1073
__kvm_memslots(struct kvm * kvm,int as_id)1074 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
1075 {
1076 as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
1077 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
1078 lockdep_is_held(&kvm->slots_lock) ||
1079 !refcount_read(&kvm->users_count));
1080 }
1081
kvm_memslots(struct kvm * kvm)1082 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
1083 {
1084 return __kvm_memslots(kvm, 0);
1085 }
1086
kvm_vcpu_memslots(struct kvm_vcpu * vcpu)1087 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
1088 {
1089 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
1090
1091 return __kvm_memslots(vcpu->kvm, as_id);
1092 }
1093
kvm_memslots_empty(struct kvm_memslots * slots)1094 static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
1095 {
1096 return RB_EMPTY_ROOT(&slots->gfn_tree);
1097 }
1098
1099 bool kvm_are_all_memslots_empty(struct kvm *kvm);
1100
1101 #define kvm_for_each_memslot(memslot, bkt, slots) \
1102 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1103 if (WARN_ON_ONCE(!memslot->npages)) { \
1104 } else
1105
1106 static inline
id_to_memslot(struct kvm_memslots * slots,int id)1107 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
1108 {
1109 struct kvm_memory_slot *slot;
1110 int idx = slots->node_idx;
1111
1112 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
1113 if (slot->id == id)
1114 return slot;
1115 }
1116
1117 return NULL;
1118 }
1119
1120 /* Iterator used for walking memslots that overlap a gfn range. */
1121 struct kvm_memslot_iter {
1122 struct kvm_memslots *slots;
1123 struct rb_node *node;
1124 struct kvm_memory_slot *slot;
1125 };
1126
kvm_memslot_iter_next(struct kvm_memslot_iter * iter)1127 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
1128 {
1129 iter->node = rb_next(iter->node);
1130 if (!iter->node)
1131 return;
1132
1133 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
1134 }
1135
kvm_memslot_iter_start(struct kvm_memslot_iter * iter,struct kvm_memslots * slots,gfn_t start)1136 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
1137 struct kvm_memslots *slots,
1138 gfn_t start)
1139 {
1140 int idx = slots->node_idx;
1141 struct rb_node *tmp;
1142 struct kvm_memory_slot *slot;
1143
1144 iter->slots = slots;
1145
1146 /*
1147 * Find the so called "upper bound" of a key - the first node that has
1148 * its key strictly greater than the searched one (the start gfn in our case).
1149 */
1150 iter->node = NULL;
1151 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
1152 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
1153 if (start < slot->base_gfn) {
1154 iter->node = tmp;
1155 tmp = tmp->rb_left;
1156 } else {
1157 tmp = tmp->rb_right;
1158 }
1159 }
1160
1161 /*
1162 * Find the slot with the lowest gfn that can possibly intersect with
1163 * the range, so we'll ideally have slot start <= range start
1164 */
1165 if (iter->node) {
1166 /*
1167 * A NULL previous node means that the very first slot
1168 * already has a higher start gfn.
1169 * In this case slot start > range start.
1170 */
1171 tmp = rb_prev(iter->node);
1172 if (tmp)
1173 iter->node = tmp;
1174 } else {
1175 /* a NULL node below means no slots */
1176 iter->node = rb_last(&slots->gfn_tree);
1177 }
1178
1179 if (iter->node) {
1180 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1181
1182 /*
1183 * It is possible in the slot start < range start case that the
1184 * found slot ends before or at range start (slot end <= range start)
1185 * and so it does not overlap the requested range.
1186 *
1187 * In such non-overlapping case the next slot (if it exists) will
1188 * already have slot start > range start, otherwise the logic above
1189 * would have found it instead of the current slot.
1190 */
1191 if (iter->slot->base_gfn + iter->slot->npages <= start)
1192 kvm_memslot_iter_next(iter);
1193 }
1194 }
1195
kvm_memslot_iter_is_valid(struct kvm_memslot_iter * iter,gfn_t end)1196 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1197 {
1198 if (!iter->node)
1199 return false;
1200
1201 /*
1202 * If this slot starts beyond or at the end of the range so does
1203 * every next one
1204 */
1205 return iter->slot->base_gfn < end;
1206 }
1207
1208 /* Iterate over each memslot at least partially intersecting [start, end) range */
1209 #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1210 for (kvm_memslot_iter_start(iter, slots, start); \
1211 kvm_memslot_iter_is_valid(iter, end); \
1212 kvm_memslot_iter_next(iter))
1213
1214 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1215 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1216 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1217
1218 /*
1219 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
1220 * - create a new memory slot
1221 * - delete an existing memory slot
1222 * - modify an existing memory slot
1223 * -- move it in the guest physical memory space
1224 * -- just change its flags
1225 *
1226 * Since flags can be changed by some of these operations, the following
1227 * differentiation is the best we can do for kvm_set_memory_region():
1228 */
1229 enum kvm_mr_change {
1230 KVM_MR_CREATE,
1231 KVM_MR_DELETE,
1232 KVM_MR_MOVE,
1233 KVM_MR_FLAGS_ONLY,
1234 };
1235
1236 int kvm_set_internal_memslot(struct kvm *kvm,
1237 const struct kvm_userspace_memory_region2 *mem);
1238 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1239 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1240 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1241 const struct kvm_memory_slot *old,
1242 struct kvm_memory_slot *new,
1243 enum kvm_mr_change change);
1244 void kvm_arch_commit_memory_region(struct kvm *kvm,
1245 struct kvm_memory_slot *old,
1246 const struct kvm_memory_slot *new,
1247 enum kvm_mr_change change);
1248 /* flush all memory translations */
1249 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1250 /* flush memory translations pointing to 'slot' */
1251 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1252 struct kvm_memory_slot *slot);
1253
1254 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
1255 struct page **pages, int nr_pages);
1256
1257 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
gfn_to_page(struct kvm * kvm,gfn_t gfn)1258 static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1259 {
1260 return __gfn_to_page(kvm, gfn, true);
1261 }
1262
1263 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1264 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1265 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1266 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1267 bool *writable);
1268
kvm_release_page_unused(struct page * page)1269 static inline void kvm_release_page_unused(struct page *page)
1270 {
1271 if (!page)
1272 return;
1273
1274 put_page(page);
1275 }
1276
1277 void kvm_release_page_clean(struct page *page);
1278 void kvm_release_page_dirty(struct page *page);
1279
kvm_release_faultin_page(struct kvm * kvm,struct page * page,bool unused,bool dirty)1280 static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
1281 bool unused, bool dirty)
1282 {
1283 lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
1284
1285 if (!page)
1286 return;
1287
1288 /*
1289 * If the page that KVM got from the *primary MMU* is writable, and KVM
1290 * installed or reused a SPTE, mark the page/folio dirty. Note, this
1291 * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
1292 * the GFN is write-protected. Folios can't be safely marked dirty
1293 * outside of mmu_lock as doing so could race with writeback on the
1294 * folio. As a result, KVM can't mark folios dirty in the fast page
1295 * fault handler, and so KVM must (somewhat) speculatively mark the
1296 * folio dirty if KVM could locklessly make the SPTE writable.
1297 */
1298 if (unused)
1299 kvm_release_page_unused(page);
1300 else if (dirty)
1301 kvm_release_page_dirty(page);
1302 else
1303 kvm_release_page_clean(page);
1304 }
1305
1306 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
1307 unsigned int foll, bool *writable,
1308 struct page **refcounted_page);
1309
kvm_faultin_pfn(struct kvm_vcpu * vcpu,gfn_t gfn,bool write,bool * writable,struct page ** refcounted_page)1310 static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
1311 bool write, bool *writable,
1312 struct page **refcounted_page)
1313 {
1314 return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
1315 write ? FOLL_WRITE : 0, writable, refcounted_page);
1316 }
1317
1318 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1319 int len);
1320 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1321 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1322 void *data, unsigned long len);
1323 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1324 void *data, unsigned int offset,
1325 unsigned long len);
1326 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1327 int offset, int len);
1328 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1329 unsigned long len);
1330 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1331 void *data, unsigned long len);
1332 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1333 void *data, unsigned int offset,
1334 unsigned long len);
1335 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1336 gpa_t gpa, unsigned long len);
1337
1338 #define __kvm_get_guest(kvm, gfn, offset, v) \
1339 ({ \
1340 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1341 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1342 int __ret = -EFAULT; \
1343 \
1344 if (!kvm_is_error_hva(__addr)) \
1345 __ret = get_user(v, __uaddr); \
1346 __ret; \
1347 })
1348
1349 #define kvm_get_guest(kvm, gpa, v) \
1350 ({ \
1351 gpa_t __gpa = gpa; \
1352 struct kvm *__kvm = kvm; \
1353 \
1354 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1355 offset_in_page(__gpa), v); \
1356 })
1357
1358 #define __kvm_put_guest(kvm, gfn, offset, v) \
1359 ({ \
1360 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1361 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1362 int __ret = -EFAULT; \
1363 \
1364 if (!kvm_is_error_hva(__addr)) \
1365 __ret = put_user(v, __uaddr); \
1366 if (!__ret) \
1367 mark_page_dirty(kvm, gfn); \
1368 __ret; \
1369 })
1370
1371 #define kvm_put_guest(kvm, gpa, v) \
1372 ({ \
1373 gpa_t __gpa = gpa; \
1374 struct kvm *__kvm = kvm; \
1375 \
1376 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1377 offset_in_page(__gpa), v); \
1378 })
1379
1380 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1381 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1382 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1383 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1384 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1385 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1386 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1387
1388 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
1389 bool writable);
1390 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
1391
kvm_vcpu_map(struct kvm_vcpu * vcpu,gpa_t gpa,struct kvm_host_map * map)1392 static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
1393 struct kvm_host_map *map)
1394 {
1395 return __kvm_vcpu_map(vcpu, gpa, map, true);
1396 }
1397
kvm_vcpu_map_readonly(struct kvm_vcpu * vcpu,gpa_t gpa,struct kvm_host_map * map)1398 static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
1399 struct kvm_host_map *map)
1400 {
1401 return __kvm_vcpu_map(vcpu, gpa, map, false);
1402 }
1403
kvm_vcpu_map_mark_dirty(struct kvm_vcpu * vcpu,struct kvm_host_map * map)1404 static inline void kvm_vcpu_map_mark_dirty(struct kvm_vcpu *vcpu,
1405 struct kvm_host_map *map)
1406 {
1407 if (kvm_vcpu_mapped(map))
1408 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
1409 }
1410
1411 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1412 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1413 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1414 int len);
1415 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1416 unsigned long len);
1417 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1418 unsigned long len);
1419 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1420 int offset, int len);
1421 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1422 unsigned long len);
1423
1424 /**
1425 * kvm_gpc_init - initialize gfn_to_pfn_cache.
1426 *
1427 * @gpc: struct gfn_to_pfn_cache object.
1428 * @kvm: pointer to kvm instance.
1429 *
1430 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
1431 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
1432 * the caller before init).
1433 */
1434 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
1435
1436 /**
1437 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
1438 * physical address.
1439 *
1440 * @gpc: struct gfn_to_pfn_cache object.
1441 * @gpa: guest physical address to map.
1442 * @len: sanity check; the range being access must fit a single page.
1443 *
1444 * @return: 0 for success.
1445 * -EINVAL for a mapping which would cross a page boundary.
1446 * -EFAULT for an untranslatable guest physical address.
1447 *
1448 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
1449 * invalidations to be processed. Callers are required to use kvm_gpc_check()
1450 * to ensure that the cache is valid before accessing the target page.
1451 */
1452 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
1453
1454 /**
1455 * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
1456 *
1457 * @gpc: struct gfn_to_pfn_cache object.
1458 * @hva: userspace virtual address to map.
1459 * @len: sanity check; the range being access must fit a single page.
1460 *
1461 * @return: 0 for success.
1462 * -EINVAL for a mapping which would cross a page boundary.
1463 * -EFAULT for an untranslatable guest physical address.
1464 *
1465 * The semantics of this function are the same as those of kvm_gpc_activate(). It
1466 * merely bypasses a layer of address translation.
1467 */
1468 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
1469
1470 /**
1471 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
1472 *
1473 * @gpc: struct gfn_to_pfn_cache object.
1474 * @len: sanity check; the range being access must fit a single page.
1475 *
1476 * @return: %true if the cache is still valid and the address matches.
1477 * %false if the cache is not valid.
1478 *
1479 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1480 * while calling this function, and then continue to hold the lock until the
1481 * access is complete.
1482 *
1483 * Callers in IN_GUEST_MODE may do so without locking, although they should
1484 * still hold a read lock on kvm->scru for the memslot checks.
1485 */
1486 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
1487
1488 /**
1489 * kvm_gpc_refresh - update a previously initialized cache.
1490 *
1491 * @gpc: struct gfn_to_pfn_cache object.
1492 * @len: sanity check; the range being access must fit a single page.
1493 *
1494 * @return: 0 for success.
1495 * -EINVAL for a mapping which would cross a page boundary.
1496 * -EFAULT for an untranslatable guest physical address.
1497 *
1498 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
1499 * return from this function does not mean the page can be immediately
1500 * accessed because it may have raced with an invalidation. Callers must
1501 * still lock and check the cache status, as this function does not return
1502 * with the lock still held to permit access.
1503 */
1504 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
1505
1506 /**
1507 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
1508 *
1509 * @gpc: struct gfn_to_pfn_cache object.
1510 *
1511 * This removes a cache from the VM's list to be processed on MMU notifier
1512 * invocation.
1513 */
1514 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
1515
kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache * gpc)1516 static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
1517 {
1518 return gpc->active && !kvm_is_error_gpa(gpc->gpa);
1519 }
1520
kvm_gpc_is_hva_active(struct gfn_to_pfn_cache * gpc)1521 static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
1522 {
1523 return gpc->active && kvm_is_error_gpa(gpc->gpa);
1524 }
1525
1526 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1527 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1528
1529 void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1530 bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1531 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1532 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1533 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1534
1535 #ifndef CONFIG_S390
1536 void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
1537
kvm_vcpu_kick(struct kvm_vcpu * vcpu)1538 static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1539 {
1540 __kvm_vcpu_kick(vcpu, false);
1541 }
1542 #endif
1543
1544 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1545 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
1546
1547 void kvm_flush_remote_tlbs(struct kvm *kvm);
1548 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1549 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1550 const struct kvm_memory_slot *memslot);
1551
1552 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1553 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1554 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
1555 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1556 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1557 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1558 #endif
1559
1560 void kvm_mmu_invalidate_begin(struct kvm *kvm);
1561 void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
1562 void kvm_mmu_invalidate_end(struct kvm *kvm);
1563 bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
1564
1565 long kvm_arch_dev_ioctl(struct file *filp,
1566 unsigned int ioctl, unsigned long arg);
1567 long kvm_arch_vcpu_ioctl(struct file *filp,
1568 unsigned int ioctl, unsigned long arg);
1569 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
1570 unsigned int ioctl, unsigned long arg);
1571 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1572
1573 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1574
1575 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1576 struct kvm_memory_slot *slot,
1577 gfn_t gfn_offset,
1578 unsigned long mask);
1579 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1580
1581 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1582 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1583 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1584 int *is_dirty, struct kvm_memory_slot **memslot);
1585 #endif
1586
1587 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1588 bool line_status);
1589 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1590 struct kvm_enable_cap *cap);
1591 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
1592 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
1593 unsigned long arg);
1594
1595 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1596 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1597
1598 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1599 struct kvm_translation *tr);
1600
1601 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1602 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1603 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1604 struct kvm_sregs *sregs);
1605 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1606 struct kvm_sregs *sregs);
1607 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1608 struct kvm_mp_state *mp_state);
1609 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1610 struct kvm_mp_state *mp_state);
1611 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1612 struct kvm_guest_debug *dbg);
1613 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1614
1615 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1616 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1617 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1618 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1619 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1620 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1621
1622 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1623 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1624 #endif
1625
1626 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1627 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1628 #else
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)1629 static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
1630 #endif
1631
1632 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
1633 /*
1634 * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
1635 * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
1636 * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
1637 * sequence, and at the end of the generic hardware disabling sequence.
1638 */
1639 void kvm_arch_enable_virtualization(void);
1640 void kvm_arch_disable_virtualization(void);
1641 /*
1642 * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
1643 * do the actual twiddling of hardware bits. The hooks are called on all
1644 * online CPUs when KVM enables/disabled virtualization, and on a single CPU
1645 * when that CPU is onlined/offlined (including for Resume/Suspend).
1646 */
1647 int kvm_arch_enable_virtualization_cpu(void);
1648 void kvm_arch_disable_virtualization_cpu(void);
1649 #endif
1650 bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
1651 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1652 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1653 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1654 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1655 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1656 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
1657 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1658 void kvm_arch_create_vm_debugfs(struct kvm *kvm);
1659
1660 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1661 /*
1662 * All architectures that want to use vzalloc currently also
1663 * need their own kvm_arch_alloc_vm implementation.
1664 */
kvm_arch_alloc_vm(void)1665 static inline struct kvm *kvm_arch_alloc_vm(void)
1666 {
1667 return kzalloc_obj(struct kvm, GFP_KERNEL_ACCOUNT);
1668 }
1669 #endif
1670
__kvm_arch_free_vm(struct kvm * kvm)1671 static inline void __kvm_arch_free_vm(struct kvm *kvm)
1672 {
1673 kvfree(kvm);
1674 }
1675
1676 #ifndef __KVM_HAVE_ARCH_VM_FREE
kvm_arch_free_vm(struct kvm * kvm)1677 static inline void kvm_arch_free_vm(struct kvm *kvm)
1678 {
1679 __kvm_arch_free_vm(kvm);
1680 }
1681 #endif
1682
1683 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
kvm_arch_flush_remote_tlbs(struct kvm * kvm)1684 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1685 {
1686 return -ENOTSUPP;
1687 }
1688 #else
1689 int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1690 #endif
1691
1692 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)1693 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
1694 gfn_t gfn, u64 nr_pages)
1695 {
1696 return -EOPNOTSUPP;
1697 }
1698 #else
1699 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1700 #endif
1701
1702 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1703 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1704 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1705 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1706 #else
kvm_arch_register_noncoherent_dma(struct kvm * kvm)1707 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1708 {
1709 }
1710
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)1711 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1712 {
1713 }
1714
kvm_arch_has_noncoherent_dma(struct kvm * kvm)1715 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1716 {
1717 return false;
1718 }
1719 #endif
1720
kvm_arch_vcpu_get_wait(struct kvm_vcpu * vcpu)1721 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1722 {
1723 #ifdef __KVM_HAVE_ARCH_WQP
1724 return vcpu->arch.waitp;
1725 #else
1726 return &vcpu->wait;
1727 #endif
1728 }
1729
1730 /*
1731 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
1732 * true if the vCPU was blocking and was awakened, false otherwise.
1733 */
__kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)1734 static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1735 {
1736 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1737 }
1738
kvm_vcpu_is_blocking(struct kvm_vcpu * vcpu)1739 static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1740 {
1741 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1742 }
1743
1744 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1745 /*
1746 * returns true if the virtual interrupt controller is initialized and
1747 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1748 * controller is dynamically instantiated and this is not always true.
1749 */
1750 bool kvm_arch_intc_initialized(struct kvm *kvm);
1751 #else
kvm_arch_intc_initialized(struct kvm * kvm)1752 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1753 {
1754 return true;
1755 }
1756 #endif
1757
1758 #ifdef CONFIG_GUEST_PERF_EVENTS
1759 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1760
1761 void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void),
1762 void (*mediated_pmi_handler)(void));
1763
kvm_register_perf_callbacks(void)1764 static inline void kvm_register_perf_callbacks(void)
1765 {
1766 __kvm_register_perf_callbacks(NULL, NULL);
1767 }
1768
1769 void kvm_unregister_perf_callbacks(void);
1770 #else
kvm_register_perf_callbacks(void)1771 static inline void kvm_register_perf_callbacks(void) {}
kvm_unregister_perf_callbacks(void)1772 static inline void kvm_unregister_perf_callbacks(void) {}
1773 #endif /* CONFIG_GUEST_PERF_EVENTS */
1774
1775 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1776 void kvm_arch_destroy_vm(struct kvm *kvm);
1777
1778 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1779
1780 struct kvm_irq_ack_notifier {
1781 struct hlist_node link;
1782 unsigned gsi;
1783 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1784 };
1785
1786 int kvm_irq_map_gsi(struct kvm *kvm,
1787 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1788 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1789
1790 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1791 bool line_status);
1792 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1793 int irq_source_id, int level, bool line_status);
1794 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1795 struct kvm *kvm, int irq_source_id,
1796 int level, bool line_status);
1797 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1798 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1799 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1800 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1801 struct kvm_irq_ack_notifier *kian);
1802 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1803 struct kvm_irq_ack_notifier *kian);
1804 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1805
1806 /*
1807 * Returns a pointer to the memslot if it contains gfn.
1808 * Otherwise returns NULL.
1809 */
1810 static inline struct kvm_memory_slot *
try_get_memslot(struct kvm_memory_slot * slot,gfn_t gfn)1811 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1812 {
1813 if (!slot)
1814 return NULL;
1815
1816 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1817 return slot;
1818 else
1819 return NULL;
1820 }
1821
1822 /*
1823 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1824 *
1825 * With "approx" set returns the memslot also when the address falls
1826 * in a hole. In that case one of the memslots bordering the hole is
1827 * returned.
1828 */
1829 static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots * slots,gfn_t gfn,bool approx)1830 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1831 {
1832 struct kvm_memory_slot *slot;
1833 struct rb_node *node;
1834 int idx = slots->node_idx;
1835
1836 slot = NULL;
1837 for (node = slots->gfn_tree.rb_node; node; ) {
1838 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1839 if (gfn >= slot->base_gfn) {
1840 if (gfn < slot->base_gfn + slot->npages)
1841 return slot;
1842 node = node->rb_right;
1843 } else
1844 node = node->rb_left;
1845 }
1846
1847 return approx ? slot : NULL;
1848 }
1849
1850 static inline struct kvm_memory_slot *
____gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn,bool approx)1851 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1852 {
1853 struct kvm_memory_slot *slot;
1854
1855 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1856 slot = try_get_memslot(slot, gfn);
1857 if (slot)
1858 return slot;
1859
1860 slot = search_memslots(slots, gfn, approx);
1861 if (slot) {
1862 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1863 return slot;
1864 }
1865
1866 return NULL;
1867 }
1868
1869 /*
1870 * __gfn_to_memslot() and its descendants are here to allow arch code to inline
1871 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
1872 * because that would bloat other code too much.
1873 */
1874 static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn)1875 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1876 {
1877 return ____gfn_to_memslot(slots, gfn, false);
1878 }
1879
1880 static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot * slot,gfn_t gfn)1881 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1882 {
1883 /*
1884 * The index was checked originally in search_memslots. To avoid
1885 * that a malicious guest builds a Spectre gadget out of e.g. page
1886 * table walks, do not let the processor speculate loads outside
1887 * the guest's registered memslots.
1888 */
1889 unsigned long offset = gfn - slot->base_gfn;
1890 offset = array_index_nospec(offset, slot->npages);
1891 return slot->userspace_addr + offset * PAGE_SIZE;
1892 }
1893
memslot_id(struct kvm * kvm,gfn_t gfn)1894 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1895 {
1896 return gfn_to_memslot(kvm, gfn)->id;
1897 }
1898
1899 static inline gfn_t
hva_to_gfn_memslot(unsigned long hva,struct kvm_memory_slot * slot)1900 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1901 {
1902 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1903
1904 return slot->base_gfn + gfn_offset;
1905 }
1906
gfn_to_gpa(gfn_t gfn)1907 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1908 {
1909 return (gpa_t)gfn << PAGE_SHIFT;
1910 }
1911
gpa_to_gfn(gpa_t gpa)1912 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1913 {
1914 return (gfn_t)(gpa >> PAGE_SHIFT);
1915 }
1916
pfn_to_hpa(kvm_pfn_t pfn)1917 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1918 {
1919 return (hpa_t)pfn << PAGE_SHIFT;
1920 }
1921
kvm_is_gpa_in_memslot(struct kvm * kvm,gpa_t gpa)1922 static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
1923 {
1924 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1925
1926 return !kvm_is_error_hva(hva);
1927 }
1928
kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache * gpc)1929 static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
1930 {
1931 lockdep_assert_held(&gpc->lock);
1932
1933 if (!gpc->memslot)
1934 return;
1935
1936 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
1937 }
1938
1939 enum kvm_stat_kind {
1940 KVM_STAT_VM,
1941 KVM_STAT_VCPU,
1942 };
1943
1944 struct kvm_stat_data {
1945 struct kvm *kvm;
1946 const struct _kvm_stats_desc *desc;
1947 enum kvm_stat_kind kind;
1948 };
1949
1950 struct _kvm_stats_desc {
1951 struct kvm_stats_desc desc;
1952 char name[KVM_STATS_NAME_SIZE];
1953 };
1954
1955 #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1956 .flags = type | unit | base | \
1957 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1958 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1959 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1960 .exponent = exp, \
1961 .size = sz, \
1962 .bucket_size = bsz
1963
1964 #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1965 { \
1966 { \
1967 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1968 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1969 }, \
1970 .name = #stat, \
1971 }
1972 #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1973 { \
1974 { \
1975 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1976 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1977 }, \
1978 .name = #stat, \
1979 }
1980 #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1981 { \
1982 { \
1983 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1984 .offset = offsetof(struct kvm_vm_stat, stat) \
1985 }, \
1986 .name = #stat, \
1987 }
1988 #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1989 { \
1990 { \
1991 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1992 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1993 }, \
1994 .name = #stat, \
1995 }
1996 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1997 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1998 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1999
2000 #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
2001 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
2002 unit, base, exponent, 1, 0)
2003 #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
2004 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
2005 unit, base, exponent, 1, 0)
2006 #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
2007 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
2008 unit, base, exponent, 1, 0)
2009 #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
2010 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
2011 unit, base, exponent, sz, bsz)
2012 #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
2013 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
2014 unit, base, exponent, sz, 0)
2015
2016 /* Cumulative counter, read/write */
2017 #define STATS_DESC_COUNTER(SCOPE, name) \
2018 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
2019 KVM_STATS_BASE_POW10, 0)
2020 /* Instantaneous counter, read only */
2021 #define STATS_DESC_ICOUNTER(SCOPE, name) \
2022 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
2023 KVM_STATS_BASE_POW10, 0)
2024 /* Peak counter, read/write */
2025 #define STATS_DESC_PCOUNTER(SCOPE, name) \
2026 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
2027 KVM_STATS_BASE_POW10, 0)
2028
2029 /* Instantaneous boolean value, read only */
2030 #define STATS_DESC_IBOOLEAN(SCOPE, name) \
2031 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2032 KVM_STATS_BASE_POW10, 0)
2033 /* Peak (sticky) boolean value, read/write */
2034 #define STATS_DESC_PBOOLEAN(SCOPE, name) \
2035 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
2036 KVM_STATS_BASE_POW10, 0)
2037
2038 /* Cumulative time in nanosecond */
2039 #define STATS_DESC_TIME_NSEC(SCOPE, name) \
2040 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2041 KVM_STATS_BASE_POW10, -9)
2042 /* Linear histogram for time in nanosecond */
2043 #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
2044 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2045 KVM_STATS_BASE_POW10, -9, sz, bsz)
2046 /* Logarithmic histogram for time in nanosecond */
2047 #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
2048 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
2049 KVM_STATS_BASE_POW10, -9, sz)
2050
2051 #define KVM_GENERIC_VM_STATS() \
2052 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
2053 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
2054
2055 #define KVM_GENERIC_VCPU_STATS() \
2056 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
2057 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
2058 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
2059 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
2060 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
2061 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
2062 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
2063 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
2064 HALT_POLL_HIST_COUNT), \
2065 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
2066 HALT_POLL_HIST_COUNT), \
2067 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
2068 HALT_POLL_HIST_COUNT), \
2069 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
2070
2071 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
2072 const struct _kvm_stats_desc *desc,
2073 void *stats, size_t size_stats,
2074 char __user *user_buffer, size_t size, loff_t *offset);
2075
2076 /**
2077 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
2078 * statistics data.
2079 *
2080 * @data: start address of the stats data
2081 * @size: the number of bucket of the stats data
2082 * @value: the new value used to update the linear histogram's bucket
2083 * @bucket_size: the size (width) of a bucket
2084 */
kvm_stats_linear_hist_update(u64 * data,size_t size,u64 value,size_t bucket_size)2085 static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
2086 u64 value, size_t bucket_size)
2087 {
2088 size_t index = div64_u64(value, bucket_size);
2089
2090 index = min(index, size - 1);
2091 ++data[index];
2092 }
2093
2094 /**
2095 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
2096 * statistics data.
2097 *
2098 * @data: start address of the stats data
2099 * @size: the number of bucket of the stats data
2100 * @value: the new value used to update the logarithmic histogram's bucket
2101 */
kvm_stats_log_hist_update(u64 * data,size_t size,u64 value)2102 static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
2103 {
2104 size_t index = fls64(value);
2105
2106 index = min(index, size - 1);
2107 ++data[index];
2108 }
2109
2110 #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
2111 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
2112 #define KVM_STATS_LOG_HIST_UPDATE(array, value) \
2113 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
2114
2115
2116 extern const struct kvm_stats_header kvm_vm_stats_header;
2117 extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
2118 extern const struct kvm_stats_header kvm_vcpu_stats_header;
2119 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
2120
2121 #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
mmu_invalidate_retry(struct kvm * kvm,unsigned long mmu_seq)2122 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
2123 {
2124 if (unlikely(kvm->mmu_invalidate_in_progress))
2125 return 1;
2126 /*
2127 * Ensure the read of mmu_invalidate_in_progress happens before
2128 * the read of mmu_invalidate_seq. This interacts with the
2129 * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
2130 * that the caller either sees the old (non-zero) value of
2131 * mmu_invalidate_in_progress or the new (incremented) value of
2132 * mmu_invalidate_seq.
2133 *
2134 * PowerPC Book3s HV KVM calls this under a per-page lock rather
2135 * than under kvm->mmu_lock, for scalability, so can't rely on
2136 * kvm->mmu_lock to keep things ordered.
2137 */
2138 smp_rmb();
2139 if (kvm->mmu_invalidate_seq != mmu_seq)
2140 return 1;
2141 return 0;
2142 }
2143
mmu_invalidate_retry_gfn(struct kvm * kvm,unsigned long mmu_seq,gfn_t gfn)2144 static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
2145 unsigned long mmu_seq,
2146 gfn_t gfn)
2147 {
2148 lockdep_assert_held(&kvm->mmu_lock);
2149 /*
2150 * If mmu_invalidate_in_progress is non-zero, then the range maintained
2151 * by kvm_mmu_notifier_invalidate_range_start contains all addresses
2152 * that might be being invalidated. Note that it may include some false
2153 * positives, due to shortcuts when handing concurrent invalidations.
2154 */
2155 if (unlikely(kvm->mmu_invalidate_in_progress)) {
2156 /*
2157 * Dropping mmu_lock after bumping mmu_invalidate_in_progress
2158 * but before updating the range is a KVM bug.
2159 */
2160 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
2161 kvm->mmu_invalidate_range_end == INVALID_GPA))
2162 return 1;
2163
2164 if (gfn >= kvm->mmu_invalidate_range_start &&
2165 gfn < kvm->mmu_invalidate_range_end)
2166 return 1;
2167 }
2168
2169 if (kvm->mmu_invalidate_seq != mmu_seq)
2170 return 1;
2171 return 0;
2172 }
2173
2174 /*
2175 * This lockless version of the range-based retry check *must* be paired with a
2176 * call to the locked version after acquiring mmu_lock, i.e. this is safe to
2177 * use only as a pre-check to avoid contending mmu_lock. This version *will*
2178 * get false negatives and false positives.
2179 */
mmu_invalidate_retry_gfn_unsafe(struct kvm * kvm,unsigned long mmu_seq,gfn_t gfn)2180 static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
2181 unsigned long mmu_seq,
2182 gfn_t gfn)
2183 {
2184 /*
2185 * Use READ_ONCE() to ensure the in-progress flag and sequence counter
2186 * are always read from memory, e.g. so that checking for retry in a
2187 * loop won't result in an infinite retry loop. Don't force loads for
2188 * start+end, as the key to avoiding infinite retry loops is observing
2189 * the 1=>0 transition of in-progress, i.e. getting false negatives
2190 * due to stale start+end values is acceptable.
2191 */
2192 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
2193 gfn >= kvm->mmu_invalidate_range_start &&
2194 gfn < kvm->mmu_invalidate_range_end)
2195 return true;
2196
2197 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
2198 }
2199 #endif
2200
2201 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2202
2203 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
2204
2205 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
2206 int kvm_set_irq_routing(struct kvm *kvm,
2207 const struct kvm_irq_routing_entry *entries,
2208 unsigned nr,
2209 unsigned flags);
2210 int kvm_init_irq_routing(struct kvm *kvm);
2211 int kvm_set_routing_entry(struct kvm *kvm,
2212 struct kvm_kernel_irq_routing_entry *e,
2213 const struct kvm_irq_routing_entry *ue);
2214 void kvm_free_irq_routing(struct kvm *kvm);
2215
2216 #else
2217
kvm_free_irq_routing(struct kvm * kvm)2218 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
2219
kvm_init_irq_routing(struct kvm * kvm)2220 static inline int kvm_init_irq_routing(struct kvm *kvm)
2221 {
2222 return 0;
2223 }
2224
2225 #endif
2226
2227 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2228
2229 void kvm_eventfd_init(struct kvm *kvm);
2230 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2231
2232 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2233 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2234 void kvm_irqfd_release(struct kvm *kvm);
2235 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2236 unsigned int irqchip,
2237 unsigned int pin);
2238 void kvm_irq_routing_update(struct kvm *);
2239 #else
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)2240 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
2241 {
2242 return -EINVAL;
2243 }
2244
kvm_irqfd_release(struct kvm * kvm)2245 static inline void kvm_irqfd_release(struct kvm *kvm) {}
2246
kvm_notify_irqfd_resampler(struct kvm * kvm,unsigned int irqchip,unsigned int pin)2247 static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2248 unsigned int irqchip,
2249 unsigned int pin)
2250 {
2251 return false;
2252 }
2253 #endif /* CONFIG_HAVE_KVM_IRQCHIP */
2254
2255 void kvm_arch_irq_routing_update(struct kvm *kvm);
2256
__kvm_make_request(int req,struct kvm_vcpu * vcpu)2257 static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
2258 {
2259 /*
2260 * Ensure the rest of the request is published to kvm_check_request's
2261 * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
2262 */
2263 smp_wmb();
2264 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2265 }
2266
kvm_make_request(int req,struct kvm_vcpu * vcpu)2267 static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
2268 {
2269 /*
2270 * Request that don't require vCPU action should never be logged in
2271 * vcpu->requests. The vCPU won't clear the request, so it will stay
2272 * logged indefinitely and prevent the vCPU from entering the guest.
2273 */
2274 BUILD_BUG_ON(!__builtin_constant_p(req) ||
2275 (req & KVM_REQUEST_NO_ACTION));
2276
2277 __kvm_make_request(req, vcpu);
2278 }
2279
2280 #ifndef CONFIG_S390
kvm_make_request_and_kick(int req,struct kvm_vcpu * vcpu)2281 static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
2282 {
2283 kvm_make_request(req, vcpu);
2284 __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
2285 }
2286 #endif
2287
kvm_request_pending(struct kvm_vcpu * vcpu)2288 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2289 {
2290 return READ_ONCE(vcpu->requests);
2291 }
2292
kvm_test_request(int req,struct kvm_vcpu * vcpu)2293 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2294 {
2295 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2296 }
2297
kvm_clear_request(int req,struct kvm_vcpu * vcpu)2298 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2299 {
2300 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2301 }
2302
kvm_check_request(int req,struct kvm_vcpu * vcpu)2303 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2304 {
2305 if (kvm_test_request(req, vcpu)) {
2306 kvm_clear_request(req, vcpu);
2307
2308 /*
2309 * Ensure the rest of the request is visible to kvm_check_request's
2310 * caller. Paired with the smp_wmb in kvm_make_request.
2311 */
2312 smp_mb__after_atomic();
2313 return true;
2314 } else {
2315 return false;
2316 }
2317 }
2318
2319 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
2320 extern bool enable_virt_at_load;
2321 extern bool kvm_rebooting;
2322 #endif
2323
2324 extern unsigned int halt_poll_ns;
2325 extern unsigned int halt_poll_ns_grow;
2326 extern unsigned int halt_poll_ns_grow_start;
2327 extern unsigned int halt_poll_ns_shrink;
2328
2329 struct kvm_device {
2330 const struct kvm_device_ops *ops;
2331 struct kvm *kvm;
2332 void *private;
2333 struct list_head vm_node;
2334 };
2335
2336 /* create, destroy, and name are mandatory */
2337 struct kvm_device_ops {
2338 const char *name;
2339
2340 /*
2341 * create is called holding kvm->lock and any operations not suitable
2342 * to do while holding the lock should be deferred to init (see
2343 * below).
2344 */
2345 int (*create)(struct kvm_device *dev, u32 type);
2346
2347 /*
2348 * init is called after create if create is successful and is called
2349 * outside of holding kvm->lock.
2350 */
2351 void (*init)(struct kvm_device *dev);
2352
2353 /*
2354 * Destroy is responsible for freeing dev.
2355 *
2356 * Destroy may be called before or after destructors are called
2357 * on emulated I/O regions, depending on whether a reference is
2358 * held by a vcpu or other kvm component that gets destroyed
2359 * after the emulated I/O.
2360 */
2361 void (*destroy)(struct kvm_device *dev);
2362
2363 /*
2364 * Release is an alternative method to free the device. It is
2365 * called when the device file descriptor is closed. Once
2366 * release is called, the destroy method will not be called
2367 * anymore as the device is removed from the device list of
2368 * the VM. kvm->lock is held.
2369 */
2370 void (*release)(struct kvm_device *dev);
2371
2372 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2373 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2374 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2375 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2376 unsigned long arg);
2377 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2378 };
2379
2380 struct kvm_device *kvm_device_from_filp(struct file *filp);
2381 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2382 void kvm_unregister_device_ops(u32 type);
2383
2384 extern struct kvm_device_ops kvm_mpic_ops;
2385 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2386 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2387
2388 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2389
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)2390 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2391 {
2392 vcpu->spin_loop.in_spin_loop = val;
2393 }
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)2394 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2395 {
2396 vcpu->spin_loop.dy_eligible = val;
2397 }
2398
2399 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2400
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)2401 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2402 {
2403 }
2404
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)2405 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2406 {
2407 }
2408 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2409
kvm_is_visible_memslot(struct kvm_memory_slot * memslot)2410 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2411 {
2412 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2413 !(memslot->flags & KVM_MEMSLOT_INVALID));
2414 }
2415
2416 struct kvm_vcpu *kvm_get_running_vcpu(void);
2417 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2418
2419 #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
2420 struct kvm_kernel_irqfd;
2421
2422 bool kvm_arch_has_irq_bypass(void);
2423 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2424 struct irq_bypass_producer *);
2425 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2426 struct irq_bypass_producer *);
2427 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2428 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2429 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
2430 struct kvm_kernel_irq_routing_entry *old,
2431 struct kvm_kernel_irq_routing_entry *new);
2432 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
2433
2434 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2435 /* If we wakeup during the poll time, was it a sucessful poll? */
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)2436 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2437 {
2438 return vcpu->valid_wakeup;
2439 }
2440
2441 #else
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)2442 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2443 {
2444 return true;
2445 }
2446 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
2447
2448 #ifdef CONFIG_HAVE_KVM_NO_POLL
2449 /* Callback that tells if we must not poll */
2450 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2451 #else
kvm_arch_no_poll(struct kvm_vcpu * vcpu)2452 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2453 {
2454 return false;
2455 }
2456 #endif /* CONFIG_HAVE_KVM_NO_POLL */
2457
2458 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2459
2460 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2461 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2462 #else
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)2463 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2464 {
2465 return 0;
2466 }
2467 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
2468
2469 #ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
kvm_handle_signal_exit(struct kvm_vcpu * vcpu)2470 static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2471 {
2472 vcpu->run->exit_reason = KVM_EXIT_INTR;
2473 vcpu->stat.signal_exits++;
2474 }
2475
kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu * vcpu)2476 static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
2477 {
2478 int r = xfer_to_guest_mode_handle_work();
2479
2480 if (r) {
2481 WARN_ON_ONCE(r != -EINTR);
2482 kvm_handle_signal_exit(vcpu);
2483 }
2484 return r;
2485 }
2486 #endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
2487
2488 /*
2489 * If more than one page is being (un)accounted, @virt must be the address of
2490 * the first page of a block of pages what were allocated together (i.e
2491 * accounted together).
2492 *
2493 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
2494 * is thread-safe.
2495 */
kvm_account_pgtable_pages(void * virt,int nr)2496 static inline void kvm_account_pgtable_pages(void *virt, int nr)
2497 {
2498 mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
2499 }
2500
2501 /*
2502 * This defines how many reserved entries we want to keep before we
2503 * kick the vcpu to the userspace to avoid dirty ring full. This
2504 * value can be tuned to higher if e.g. PML is enabled on the host.
2505 */
2506 #define KVM_DIRTY_RING_RSVD_ENTRIES 64
2507
2508 /* Max number of entries allowed for each kvm dirty ring */
2509 #define KVM_DIRTY_RING_MAX_ENTRIES 65536
2510
kvm_prepare_memory_fault_exit(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t size,bool is_write,bool is_exec,bool is_private)2511 static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
2512 gpa_t gpa, gpa_t size,
2513 bool is_write, bool is_exec,
2514 bool is_private)
2515 {
2516 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
2517 vcpu->run->memory_fault.gpa = gpa;
2518 vcpu->run->memory_fault.size = size;
2519
2520 /* RWX flags are not (yet) defined or communicated to userspace. */
2521 vcpu->run->memory_fault.flags = 0;
2522 if (is_private)
2523 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
2524 }
2525
kvm_memslot_is_gmem_only(const struct kvm_memory_slot * slot)2526 static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot)
2527 {
2528 if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
2529 return false;
2530
2531 return slot->flags & KVM_MEMSLOT_GMEM_ONLY;
2532 }
2533
2534 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
kvm_get_memory_attributes(struct kvm * kvm,gfn_t gfn)2535 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
2536 {
2537 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
2538 }
2539
2540 bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2541 unsigned long mask, unsigned long attrs);
2542 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
2543 struct kvm_gfn_range *range);
2544 bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
2545 struct kvm_gfn_range *range);
2546
kvm_mem_is_private(struct kvm * kvm,gfn_t gfn)2547 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2548 {
2549 return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
2550 }
2551 #else
kvm_mem_is_private(struct kvm * kvm,gfn_t gfn)2552 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2553 {
2554 return false;
2555 }
2556 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
2557
2558 #ifdef CONFIG_KVM_GUEST_MEMFD
2559 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
2560 gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
2561 int *max_order);
2562 #else
kvm_gmem_get_pfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,struct page ** page,int * max_order)2563 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
2564 struct kvm_memory_slot *slot, gfn_t gfn,
2565 kvm_pfn_t *pfn, struct page **page,
2566 int *max_order)
2567 {
2568 KVM_BUG_ON(1, kvm);
2569 return -EIO;
2570 }
2571 #endif /* CONFIG_KVM_GUEST_MEMFD */
2572
2573 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
2574 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
2575 #endif
2576
2577 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
2578 /**
2579 * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
2580 *
2581 * @kvm: KVM instance
2582 * @gfn: starting GFN to be populated
2583 * @src: userspace-provided buffer containing data to copy into GFN range
2584 * (passed to @post_populate, and incremented on each iteration
2585 * if not NULL). Must be page-aligned.
2586 * @npages: number of pages to copy from userspace-buffer
2587 * @post_populate: callback to issue for each gmem page that backs the GPA
2588 * range
2589 * @opaque: opaque data to pass to @post_populate callback
2590 *
2591 * This is primarily intended for cases where a gmem-backed GPA range needs
2592 * to be initialized with userspace-provided data prior to being mapped into
2593 * the guest as a private page. This should be called with the slots->lock
2594 * held so that caller-enforced invariants regarding the expected memory
2595 * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
2596 *
2597 * Returns the number of pages that were populated.
2598 */
2599 typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2600 struct page *page, void *opaque);
2601
2602 long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
2603 kvm_gmem_populate_cb post_populate, void *opaque);
2604 #endif
2605
2606 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
2607 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
2608 #endif
2609
2610 #ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
2611 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
2612 struct kvm_pre_fault_memory *range);
2613 #endif
2614
2615 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
2616 int kvm_enable_virtualization(void);
2617 void kvm_disable_virtualization(void);
2618 #else
kvm_enable_virtualization(void)2619 static inline int kvm_enable_virtualization(void) { return 0; }
kvm_disable_virtualization(void)2620 static inline void kvm_disable_virtualization(void) { }
2621 #endif
2622
2623 #endif
2624