1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This header defines architecture specific interfaces, x86 version
6 */
7
8 #ifndef _ASM_X86_KVM_HOST_H
9 #define _ASM_X86_KVM_HOST_H
10
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/tracepoint.h>
15 #include <linux/cpumask.h>
16 #include <linux/irq_work.h>
17 #include <linux/irq.h>
18 #include <linux/workqueue.h>
19
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22 #include <linux/kvm_types.h>
23 #include <linux/perf_event.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/clocksource.h>
26 #include <linux/irqbypass.h>
27 #include <linux/kfifo.h>
28 #include <linux/sched/vhost_task.h>
29 #include <linux/call_once.h>
30 #include <linux/atomic.h>
31
32 #include <asm/apic.h>
33 #include <asm/pvclock-abi.h>
34 #include <asm/debugreg.h>
35 #include <asm/desc.h>
36 #include <asm/mtrr.h>
37 #include <asm/msr-index.h>
38 #include <asm/msr.h>
39 #include <asm/asm.h>
40 #include <asm/irq_remapping.h>
41 #include <asm/kvm_page_track.h>
42 #include <asm/kvm_vcpu_regs.h>
43 #include <asm/virt.h>
44
45 #include <hyperv/hvhdk.h>
46
47 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
48
49 /*
50 * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
51 * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
52 */
53 #ifdef CONFIG_KVM_MAX_NR_VCPUS
54 #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS
55 #else
56 #define KVM_MAX_VCPUS 1024
57 #endif
58
59 /*
60 * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
61 * might be larger than the actual number of VCPUs because the
62 * APIC ID encodes CPU topology information.
63 *
64 * In the worst case, we'll need less than one extra bit for the
65 * Core ID, and less than one extra bit for the Package (Die) ID,
66 * so ratio of 4 should be enough.
67 */
68 #define KVM_VCPU_ID_RATIO 4
69 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
70
71 /* memory slots that are not exposed to userspace */
72 #define KVM_INTERNAL_MEM_SLOTS 3
73
74 #define KVM_HALT_POLL_NS_DEFAULT 200000
75
76 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
77
78 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
79 KVM_DIRTY_LOG_INITIALLY_SET)
80
81 #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
82 KVM_BUS_LOCK_DETECTION_EXIT)
83
84 #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
85 KVM_X86_NOTIFY_VMEXIT_USER)
86
87 /* x86-specific vcpu->requests bit members */
88 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
89 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
90 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
91 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
92 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
93 #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
94 #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
95 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
96 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
97 #define KVM_REQ_NMI KVM_ARCH_REQ(9)
98 #define KVM_REQ_PMU KVM_ARCH_REQ(10)
99 #define KVM_REQ_PMI KVM_ARCH_REQ(11)
100 #ifdef CONFIG_KVM_SMM
101 #define KVM_REQ_SMI KVM_ARCH_REQ(12)
102 #endif
103 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
104 #define KVM_REQ_MCLOCK_INPROGRESS \
105 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
106 #define KVM_REQ_SCAN_IOAPIC \
107 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
108 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
109 #define KVM_REQ_APIC_PAGE_RELOAD \
110 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
111 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
112 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
113 #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
114 #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
115 #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
116 #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
117 #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
118 #define KVM_REQ_APICV_UPDATE \
119 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
120 #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
121 #define KVM_REQ_TLB_FLUSH_GUEST \
122 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
123 #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
124 #define KVM_REQ_RECALC_INTERCEPTS KVM_ARCH_REQ(29)
125 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
126 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
127 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
128 KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
129 #define KVM_REQ_HV_TLB_FLUSH \
130 KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
131 #define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \
132 KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT)
133
134 #define CR0_RESERVED_BITS \
135 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
136 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
137 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
138
139 #define CR4_RESERVED_BITS \
140 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
141 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
142 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
143 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
144 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
145 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
146 | X86_CR4_LAM_SUP | X86_CR4_CET))
147
148 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
149
150
151
152 #define INVALID_PAGE (~(hpa_t)0)
153 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
154
155 /* KVM Hugepage definitions for x86 */
156 #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
157 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
158 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
159 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
160 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
161 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
162 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
163
164 #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
165 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
166 #define KVM_MMU_HASH_SHIFT 12
167 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
168 #define KVM_MIN_FREE_MMU_PAGES 5
169 #define KVM_REFILL_PAGES 25
170 #define KVM_MAX_CPUID_ENTRIES 256
171 #define KVM_NR_VAR_MTRR 8
172
173 #define ASYNC_PF_PER_VCPU 64
174
175 enum kvm_reg {
176 VCPU_REGS_RAX = __VCPU_REGS_RAX,
177 VCPU_REGS_RCX = __VCPU_REGS_RCX,
178 VCPU_REGS_RDX = __VCPU_REGS_RDX,
179 VCPU_REGS_RBX = __VCPU_REGS_RBX,
180 VCPU_REGS_RSP = __VCPU_REGS_RSP,
181 VCPU_REGS_RBP = __VCPU_REGS_RBP,
182 VCPU_REGS_RSI = __VCPU_REGS_RSI,
183 VCPU_REGS_RDI = __VCPU_REGS_RDI,
184 #ifdef CONFIG_X86_64
185 VCPU_REGS_R8 = __VCPU_REGS_R8,
186 VCPU_REGS_R9 = __VCPU_REGS_R9,
187 VCPU_REGS_R10 = __VCPU_REGS_R10,
188 VCPU_REGS_R11 = __VCPU_REGS_R11,
189 VCPU_REGS_R12 = __VCPU_REGS_R12,
190 VCPU_REGS_R13 = __VCPU_REGS_R13,
191 VCPU_REGS_R14 = __VCPU_REGS_R14,
192 VCPU_REGS_R15 = __VCPU_REGS_R15,
193 #endif
194 VCPU_REGS_RIP,
195 NR_VCPU_REGS,
196
197 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
198 VCPU_EXREG_CR0,
199 /*
200 * Alias AMD's ERAPS (not a real register) to CR3 so that common code
201 * can trigger emulation of the RAP (Return Address Predictor) with
202 * minimal support required in common code. Piggyback CR3 as the RAP
203 * is cleared on writes to CR3, i.e. marking CR3 dirty will naturally
204 * mark ERAPS dirty as well.
205 */
206 VCPU_EXREG_CR3,
207 VCPU_EXREG_ERAPS = VCPU_EXREG_CR3,
208 VCPU_EXREG_CR4,
209 VCPU_EXREG_RFLAGS,
210 VCPU_EXREG_SEGMENTS,
211 VCPU_EXREG_EXIT_INFO_1,
212 VCPU_EXREG_EXIT_INFO_2,
213 };
214
215 enum {
216 VCPU_SREG_ES,
217 VCPU_SREG_CS,
218 VCPU_SREG_SS,
219 VCPU_SREG_DS,
220 VCPU_SREG_FS,
221 VCPU_SREG_GS,
222 VCPU_SREG_TR,
223 VCPU_SREG_LDTR,
224 };
225
226 enum exit_fastpath_completion {
227 EXIT_FASTPATH_NONE,
228 EXIT_FASTPATH_REENTER_GUEST,
229 EXIT_FASTPATH_EXIT_HANDLED,
230 EXIT_FASTPATH_EXIT_USERSPACE,
231 };
232 typedef enum exit_fastpath_completion fastpath_t;
233
234 struct x86_emulate_ctxt;
235 struct x86_exception;
236 union kvm_smram;
237 enum x86_intercept;
238 enum x86_intercept_stage;
239
240 #define KVM_NR_DB_REGS 4
241
242 #define DR6_BUS_LOCK (1 << 11)
243 #define DR6_BD (1 << 13)
244 #define DR6_BS (1 << 14)
245 #define DR6_BT (1 << 15)
246 #define DR6_RTM (1 << 16)
247 /*
248 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
249 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
250 * they will never be 0 for now, but when they are defined
251 * in the future it will require no code change.
252 *
253 * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
254 */
255 #define DR6_ACTIVE_LOW 0xffff0ff0
256 #define DR6_VOLATILE 0x0001e80f
257 #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
258
259 #define DR7_BP_EN_MASK 0x000000ff
260 #define DR7_GE (1 << 9)
261 #define DR7_GD (1 << 13)
262 #define DR7_VOLATILE 0xffff2bff
263
264 #define KVM_GUESTDBG_VALID_MASK \
265 (KVM_GUESTDBG_ENABLE | \
266 KVM_GUESTDBG_SINGLESTEP | \
267 KVM_GUESTDBG_USE_HW_BP | \
268 KVM_GUESTDBG_USE_SW_BP | \
269 KVM_GUESTDBG_INJECT_BP | \
270 KVM_GUESTDBG_INJECT_DB | \
271 KVM_GUESTDBG_BLOCKIRQ)
272
273 #define PFERR_PRESENT_MASK BIT(0)
274 #define PFERR_WRITE_MASK BIT(1)
275 #define PFERR_USER_MASK BIT(2)
276 #define PFERR_RSVD_MASK BIT(3)
277 #define PFERR_FETCH_MASK BIT(4)
278 #define PFERR_PK_MASK BIT(5)
279 #define PFERR_SS_MASK BIT(6)
280 #define PFERR_SGX_MASK BIT(15)
281 #define PFERR_GUEST_RMP_MASK BIT_ULL(31)
282 #define PFERR_GUEST_FINAL_MASK BIT_ULL(32)
283 #define PFERR_GUEST_PAGE_MASK BIT_ULL(33)
284 #define PFERR_GUEST_ENC_MASK BIT_ULL(34)
285 #define PFERR_GUEST_SIZEM_MASK BIT_ULL(35)
286 #define PFERR_GUEST_VMPL_MASK BIT_ULL(36)
287
288 /*
289 * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
290 * when emulating instructions that triggers implicit access.
291 */
292 #define PFERR_IMPLICIT_ACCESS BIT_ULL(48)
293 /*
294 * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
295 * when the guest was accessing private memory.
296 */
297 #define PFERR_PRIVATE_ACCESS BIT_ULL(49)
298 #define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
299
300 /* apic attention bits */
301 #define KVM_APIC_CHECK_VAPIC 0
302 /*
303 * The following bit is set with PV-EOI, unset on EOI.
304 * We detect PV-EOI changes by guest by comparing
305 * this bit with PV-EOI in guest memory.
306 * See the implementation in apic_update_pv_eoi.
307 */
308 #define KVM_APIC_PV_EOI_PENDING 1
309
310 struct kvm_kernel_irqfd;
311 struct kvm_kernel_irq_routing_entry;
312
313 /*
314 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
315 * also includes TDP pages) to determine whether or not a page can be used in
316 * the given MMU context. This is a subset of the overall kvm_cpu_role to
317 * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
318 * allocating 2 bytes per gfn instead of 4 bytes per gfn.
319 *
320 * Upper-level shadow pages having gptes are tracked for write-protection via
321 * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must
322 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
323 * otherwise gfn_write_track will overflow and explosions will ensue.
324 *
325 * A unique shadow page (SP) for a gfn is created if and only if an existing SP
326 * cannot be reused. The ability to reuse a SP is tracked by its role, which
327 * incorporates various mode bits and properties of the SP. Roughly speaking,
328 * the number of unique SPs that can theoretically be created is 2^n, where n
329 * is the number of bits that are used to compute the role.
330 *
331 * But, even though there are 20 bits in the mask below, not all combinations
332 * of modes and flags are possible:
333 *
334 * - invalid shadow pages are not accounted, mirror pages are not shadowed,
335 * so the bits are effectively 18.
336 *
337 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
338 * execonly and ad_disabled are only used for nested EPT which has
339 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
340 *
341 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
342 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
343 * paging has exactly one upper level, making level completely redundant
344 * when has_4_byte_gpte=1.
345 *
346 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
347 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
348 *
349 * Therefore, the maximum number of possible upper-level shadow pages for a
350 * single gfn is a bit less than 2^13.
351 */
352 union kvm_mmu_page_role {
353 u32 word;
354 struct {
355 unsigned level:4;
356 unsigned has_4_byte_gpte:1;
357 unsigned quadrant:2;
358 unsigned direct:1;
359 unsigned access:3;
360 unsigned invalid:1;
361 unsigned efer_nx:1;
362 unsigned cr0_wp:1;
363 unsigned smep_andnot_wp:1;
364 unsigned smap_andnot_wp:1;
365 unsigned ad_disabled:1;
366 unsigned guest_mode:1;
367 unsigned passthrough:1;
368 unsigned is_mirror:1;
369 unsigned :4;
370
371 /*
372 * This is left at the top of the word so that
373 * kvm_memslots_for_spte_role can extract it with a
374 * simple shift. While there is room, give it a whole
375 * byte so it is also faster to load it from memory.
376 */
377 unsigned smm:8;
378 };
379 };
380
381 /*
382 * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
383 * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
384 * including on nested transitions, if nothing in the full role changes then
385 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
386 * don't treat all-zero structure as valid data.
387 *
388 * The properties that are tracked in the extended role but not the page role
389 * are for things that either (a) do not affect the validity of the shadow page
390 * or (b) are indirectly reflected in the shadow page's role. For example,
391 * CR4.PKE only affects permission checks for software walks of the guest page
392 * tables (because KVM doesn't support Protection Keys with shadow paging), and
393 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
394 *
395 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
396 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
397 * SMAP, but the MMU's permission checks for software walks need to be SMEP and
398 * SMAP aware regardless of CR0.WP.
399 */
400 union kvm_mmu_extended_role {
401 u32 word;
402 struct {
403 unsigned int valid:1;
404 unsigned int execonly:1;
405 unsigned int cr4_pse:1;
406 unsigned int cr4_pke:1;
407 unsigned int cr4_smap:1;
408 unsigned int cr4_smep:1;
409 unsigned int cr4_la57:1;
410 unsigned int efer_lma:1;
411 };
412 };
413
414 union kvm_cpu_role {
415 u64 as_u64;
416 struct {
417 union kvm_mmu_page_role base;
418 union kvm_mmu_extended_role ext;
419 };
420 };
421
422 struct kvm_rmap_head {
423 atomic_long_t val;
424 };
425
426 struct kvm_pio_request {
427 unsigned long count;
428 int in;
429 int port;
430 int size;
431 };
432
433 #define PT64_ROOT_MAX_LEVEL 5
434
435 struct rsvd_bits_validate {
436 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
437 u64 bad_mt_xwr;
438 };
439
440 struct kvm_mmu_root_info {
441 gpa_t pgd;
442 hpa_t hpa;
443 };
444
445 #define KVM_MMU_ROOT_INFO_INVALID \
446 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
447
448 #define KVM_MMU_NUM_PREV_ROOTS 3
449
450 #define KVM_MMU_ROOT_CURRENT BIT(0)
451 #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
452 #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
453
454 #define KVM_HAVE_MMU_RWLOCK
455
456 struct kvm_mmu_page;
457 struct kvm_page_fault;
458
459 /*
460 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
461 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
462 * current mmu mode.
463 */
464 struct kvm_mmu {
465 unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
466 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
467 int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
468 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
469 struct x86_exception *fault);
470 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
471 gpa_t gva_or_gpa, u64 access,
472 struct x86_exception *exception);
473 int (*sync_spte)(struct kvm_vcpu *vcpu,
474 struct kvm_mmu_page *sp, int i);
475 struct kvm_mmu_root_info root;
476 hpa_t mirror_root_hpa;
477 union kvm_cpu_role cpu_role;
478 union kvm_mmu_page_role root_role;
479
480 /*
481 * The pkru_mask indicates if protection key checks are needed. It
482 * consists of 16 domains indexed by page fault error code bits [4:1],
483 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
484 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
485 */
486 u32 pkru_mask;
487
488 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
489
490 /*
491 * Bitmap; bit set = permission fault
492 * Byte index: page fault error code [4:1]
493 * Bit index: pte permissions in ACC_* format
494 */
495 u8 permissions[16];
496
497 u64 *pae_root;
498 u64 *pml4_root;
499 u64 *pml5_root;
500
501 /*
502 * check zero bits on shadow page table entries, these
503 * bits include not only hardware reserved bits but also
504 * the bits spte never used.
505 */
506 struct rsvd_bits_validate shadow_zero_check;
507
508 struct rsvd_bits_validate guest_rsvd_check;
509
510 u64 pdptrs[4]; /* pae */
511 };
512
513 enum pmc_type {
514 KVM_PMC_GP = 0,
515 KVM_PMC_FIXED,
516 };
517
518 struct kvm_pmc {
519 enum pmc_type type;
520 u8 idx;
521 bool is_paused;
522 bool intr;
523 /*
524 * Base value of the PMC counter, relative to the *consumed* count in
525 * the associated perf_event. This value includes counter updates from
526 * the perf_event and emulated_count since the last time the counter
527 * was reprogrammed, but it is *not* the current value as seen by the
528 * guest or userspace.
529 *
530 * The count is relative to the associated perf_event so that KVM
531 * doesn't need to reprogram the perf_event every time the guest writes
532 * to the counter.
533 */
534 u64 counter;
535 /*
536 * PMC events triggered by KVM emulation that haven't been fully
537 * processed, i.e. haven't undergone overflow detection.
538 */
539 u64 emulated_counter;
540 u64 eventsel;
541 u64 eventsel_hw;
542 struct perf_event *perf_event;
543 struct kvm_vcpu *vcpu;
544 /*
545 * only for creating or reusing perf_event,
546 * eventsel value for general purpose counters,
547 * ctrl value for fixed counters.
548 */
549 u64 current_config;
550 };
551
552 /* More counters may conflict with other existing Architectural MSRs */
553 #define KVM_MAX(a, b) ((a) >= (b) ? (a) : (b))
554 #define KVM_MAX_NR_INTEL_GP_COUNTERS 8
555 #define KVM_MAX_NR_AMD_GP_COUNTERS 6
556 #define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
557 KVM_MAX_NR_AMD_GP_COUNTERS)
558
559 #define KVM_MAX_NR_INTEL_FIXED_COUNTERS 3
560 #define KVM_MAX_NR_AMD_FIXED_COUNTERS 0
561 #define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUNTERS, \
562 KVM_MAX_NR_AMD_FIXED_COUNTERS)
563
564 struct kvm_pmu {
565 u8 version;
566 unsigned nr_arch_gp_counters;
567 unsigned nr_arch_fixed_counters;
568 unsigned available_event_types;
569 u64 fixed_ctr_ctrl;
570 u64 fixed_ctr_ctrl_hw;
571 u64 fixed_ctr_ctrl_rsvd;
572 u64 global_ctrl;
573 u64 global_status;
574 u64 counter_bitmask[2];
575 u64 global_ctrl_rsvd;
576 u64 global_status_rsvd;
577 u64 reserved_bits;
578 u64 raw_event_mask;
579 struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS];
580 struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS];
581
582 /*
583 * Overlay the bitmap with a 64-bit atomic so that all bits can be
584 * set in a single access, e.g. to reprogram all counters when the PMU
585 * filter changes.
586 */
587 union {
588 DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
589 atomic64_t __reprogram_pmi;
590 };
591 DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
592 DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
593
594 DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
595 DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
596
597 u64 ds_area;
598 u64 pebs_enable;
599 u64 pebs_enable_rsvd;
600 u64 pebs_data_cfg;
601 u64 pebs_data_cfg_rsvd;
602
603 /*
604 * If a guest counter is cross-mapped to host counter with different
605 * index, its PEBS capability will be temporarily disabled.
606 *
607 * The user should make sure that this mask is updated
608 * after disabling interrupts and before perf_guest_get_msrs();
609 */
610 u64 host_cross_mapped_mask;
611
612 /*
613 * The gate to release perf_events not marked in
614 * pmc_in_use only once in a vcpu time slice.
615 */
616 bool need_cleanup;
617
618 /*
619 * The total number of programmed perf_events and it helps to avoid
620 * redundant check before cleanup if guest don't use vPMU at all.
621 */
622 u8 event_count;
623 };
624
625 struct kvm_pmu_ops;
626
627 enum {
628 KVM_DEBUGREG_BP_ENABLED = BIT(0),
629 KVM_DEBUGREG_WONT_EXIT = BIT(1),
630 /*
631 * Guest debug registers (DR0-3, DR6 and DR7) are saved/restored by
632 * hardware on exit from or enter to guest. KVM needn't switch them.
633 * DR0-3, DR6 and DR7 are set to their architectural INIT value on VM
634 * exit, host values need to be restored.
635 */
636 KVM_DEBUGREG_AUTO_SWITCH = BIT(2),
637 };
638
639 struct kvm_mtrr {
640 u64 var[KVM_NR_VAR_MTRR * 2];
641 u64 fixed_64k;
642 u64 fixed_16k[2];
643 u64 fixed_4k[8];
644 u64 deftype;
645 };
646
647 /* Hyper-V SynIC timer */
648 struct kvm_vcpu_hv_stimer {
649 struct hrtimer timer;
650 int index;
651 union hv_stimer_config config;
652 u64 count;
653 u64 exp_time;
654 struct hv_message msg;
655 bool msg_pending;
656 };
657
658 /* Hyper-V synthetic interrupt controller (SynIC)*/
659 struct kvm_vcpu_hv_synic {
660 u64 version;
661 u64 control;
662 u64 msg_page;
663 u64 evt_page;
664 atomic64_t sint[HV_SYNIC_SINT_COUNT];
665 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
666 DECLARE_BITMAP(auto_eoi_bitmap, 256);
667 DECLARE_BITMAP(vec_bitmap, 256);
668 bool active;
669 bool dont_zero_synic_pages;
670 };
671
672 /* The maximum number of entries on the TLB flush fifo. */
673 #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
674 /*
675 * Note: the following 'magic' entry is made up by KVM to avoid putting
676 * anything besides GVA on the TLB flush fifo. It is theoretically possible
677 * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
678 * which will look identical. KVM's action to 'flush everything' instead of
679 * flushing these particular addresses is, however, fully legitimate as
680 * flushing more than requested is always OK.
681 */
682 #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
683
684 enum hv_tlb_flush_fifos {
685 HV_L1_TLB_FLUSH_FIFO,
686 HV_L2_TLB_FLUSH_FIFO,
687 HV_NR_TLB_FLUSH_FIFOS,
688 };
689
690 struct kvm_vcpu_hv_tlb_flush_fifo {
691 spinlock_t write_lock;
692 DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
693 };
694
695 /* Hyper-V per vcpu emulation context */
696 struct kvm_vcpu_hv {
697 struct kvm_vcpu *vcpu;
698 u32 vp_index;
699 u64 hv_vapic;
700 s64 runtime_offset;
701 struct kvm_vcpu_hv_synic synic;
702 struct kvm_hyperv_exit exit;
703 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
704 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
705 bool enforce_cpuid;
706 struct {
707 u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
708 u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
709 u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
710 u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
711 u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
712 u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
713 u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
714 u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
715 } cpuid_cache;
716
717 struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
718
719 /*
720 * Preallocated buffers for handling hypercalls that pass sparse vCPU
721 * sets (for high vCPU counts, they're too large to comfortably fit on
722 * the stack).
723 */
724 u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
725 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
726
727 struct hv_vp_assist_page vp_assist_page;
728
729 struct {
730 u64 pa_page_gpa;
731 u64 vm_id;
732 u32 vp_id;
733 } nested;
734 };
735
736 struct kvm_hypervisor_cpuid {
737 u32 base;
738 u32 limit;
739 };
740
741 #ifdef CONFIG_KVM_XEN
742 /* Xen HVM per vcpu emulation context */
743 struct kvm_vcpu_xen {
744 u64 hypercall_rip;
745 u32 current_runstate;
746 u8 upcall_vector;
747 struct gfn_to_pfn_cache vcpu_info_cache;
748 struct gfn_to_pfn_cache vcpu_time_info_cache;
749 struct gfn_to_pfn_cache runstate_cache;
750 struct gfn_to_pfn_cache runstate2_cache;
751 u64 last_steal;
752 u64 runstate_entry_time;
753 u64 runstate_times[4];
754 unsigned long evtchn_pending_sel;
755 u32 vcpu_id; /* The Xen / ACPI vCPU ID */
756 u32 timer_virq;
757 u64 timer_expires; /* In guest epoch */
758 atomic_t timer_pending;
759 struct hrtimer timer;
760 int poll_evtchn;
761 struct timer_list poll_timer;
762 struct kvm_hypervisor_cpuid cpuid;
763 };
764 #endif
765
766 struct kvm_queued_exception {
767 bool pending;
768 bool injected;
769 bool has_error_code;
770 u8 vector;
771 u32 error_code;
772 unsigned long payload;
773 bool has_payload;
774 };
775
776 /*
777 * Hardware-defined CPUID leafs that are either scattered by the kernel or are
778 * unknown to the kernel, but need to be directly used by KVM. Note, these
779 * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
780 */
781 enum kvm_only_cpuid_leafs {
782 CPUID_12_EAX = NCAPINTS,
783 CPUID_7_1_EDX,
784 CPUID_8000_0007_EDX,
785 CPUID_8000_0022_EAX,
786 CPUID_7_2_EDX,
787 CPUID_24_0_EBX,
788 CPUID_8000_0021_ECX,
789 CPUID_7_1_ECX,
790 CPUID_1E_1_EAX,
791 CPUID_24_1_ECX,
792 NR_KVM_CPU_CAPS,
793
794 NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
795 };
796
797 struct kvm_vcpu_arch {
798 /*
799 * rip and regs accesses must go through
800 * kvm_{register,rip}_{read,write} functions.
801 */
802 unsigned long regs[NR_VCPU_REGS];
803 u32 regs_avail;
804 u32 regs_dirty;
805
806 unsigned long cr0;
807 unsigned long cr0_guest_owned_bits;
808 unsigned long cr2;
809 unsigned long cr3;
810 unsigned long cr4;
811 unsigned long cr4_guest_owned_bits;
812 unsigned long cr4_guest_rsvd_bits;
813 unsigned long cr8;
814 u32 host_pkru;
815 u32 pkru;
816 u32 hflags;
817 u64 efer;
818 u64 host_debugctl;
819 u64 apic_base;
820 struct kvm_lapic *apic; /* kernel irqchip context */
821 bool load_eoi_exitmap_pending;
822 DECLARE_BITMAP(ioapic_handled_vectors, 256);
823 unsigned long apic_attention;
824 int32_t apic_arb_prio;
825 int mp_state;
826 u64 ia32_misc_enable_msr;
827 u64 smbase;
828 u64 smi_count;
829 bool at_instruction_boundary;
830 bool tpr_access_reporting;
831 bool xfd_no_write_intercept;
832 u64 microcode_version;
833 u64 arch_capabilities;
834 u64 perf_capabilities;
835
836 /*
837 * Paging state of the vcpu
838 *
839 * If the vcpu runs in guest mode with two level paging this still saves
840 * the paging mode of the l1 guest. This context is always used to
841 * handle faults.
842 */
843 struct kvm_mmu *mmu;
844
845 /* Non-nested MMU for L1 */
846 struct kvm_mmu root_mmu;
847
848 /* L1 MMU when running nested */
849 struct kvm_mmu guest_mmu;
850
851 /*
852 * Paging state of an L2 guest (used for nested npt)
853 *
854 * This context will save all necessary information to walk page tables
855 * of an L2 guest. This context is only initialized for page table
856 * walking and not for faulting since we never handle l2 page faults on
857 * the host.
858 */
859 struct kvm_mmu nested_mmu;
860
861 /*
862 * Pointer to the mmu context currently used for
863 * gva_to_gpa translations.
864 */
865 struct kvm_mmu *walk_mmu;
866
867 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
868 struct kvm_mmu_memory_cache mmu_shadow_page_cache;
869 struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
870 struct kvm_mmu_memory_cache mmu_page_header_cache;
871 /*
872 * This cache is to allocate external page table. E.g. private EPT used
873 * by the TDX module.
874 */
875 struct kvm_mmu_memory_cache mmu_external_spt_cache;
876
877 /*
878 * QEMU userspace and the guest each have their own FPU state.
879 * In vcpu_run, we switch between the user and guest FPU contexts.
880 * While running a VCPU, the VCPU thread will have the guest FPU
881 * context.
882 *
883 * Note that while the PKRU state lives inside the fpu registers,
884 * it is switched out separately at VMENTER and VMEXIT time. The
885 * "guest_fpstate" state here contains the guest FPU context, with the
886 * host PRKU bits.
887 */
888 struct fpu_guest guest_fpu;
889
890 u64 xcr0;
891 u64 guest_supported_xcr0;
892 u64 ia32_xss;
893 u64 guest_supported_xss;
894
895 struct kvm_pio_request pio;
896 void *pio_data;
897 void *sev_pio_data;
898 unsigned sev_pio_count;
899
900 u8 event_exit_inst_len;
901
902 bool exception_from_userspace;
903
904 /* Exceptions to be injected to the guest. */
905 struct kvm_queued_exception exception;
906 /* Exception VM-Exits to be synthesized to L1. */
907 struct kvm_queued_exception exception_vmexit;
908
909 struct kvm_queued_interrupt {
910 bool injected;
911 bool soft;
912 u8 nr;
913 } interrupt;
914
915 int halt_request; /* real mode on Intel only */
916
917 int cpuid_nent;
918 struct kvm_cpuid_entry2 *cpuid_entries;
919 bool cpuid_dynamic_bits_dirty;
920 bool is_amd_compatible;
921
922 /*
923 * cpu_caps holds the effective guest capabilities, i.e. the features
924 * the vCPU is allowed to use. Typically, but not always, features can
925 * be used by the guest if and only if both KVM and userspace want to
926 * expose the feature to the guest.
927 *
928 * A common exception is for virtualization holes, i.e. when KVM can't
929 * prevent the guest from using a feature, in which case the vCPU "has"
930 * the feature regardless of what KVM or userspace desires.
931 *
932 * Note, features that don't require KVM involvement in any way are
933 * NOT enforced/sanitized by KVM, i.e. are taken verbatim from the
934 * guest CPUID provided by userspace.
935 */
936 u32 cpu_caps[NR_KVM_CPU_CAPS];
937
938 u64 reserved_gpa_bits;
939 int maxphyaddr;
940
941 /* emulate context */
942
943 struct x86_emulate_ctxt *emulate_ctxt;
944 bool emulate_regs_need_sync_to_vcpu;
945 bool emulate_regs_need_sync_from_vcpu;
946 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
947 unsigned long cui_linear_rip;
948 int cui_rdmsr_imm_reg;
949
950 gpa_t time;
951 s8 pvclock_tsc_shift;
952 u32 pvclock_tsc_mul;
953 unsigned int hw_tsc_khz;
954 struct gfn_to_pfn_cache pv_time;
955 /* set guest stopped flag in pvclock flags field */
956 bool pvclock_set_guest_stopped_request;
957
958 struct {
959 u8 preempted;
960 u64 msr_val;
961 u64 last_steal;
962 struct gfn_to_hva_cache cache;
963 } st;
964
965 u64 l1_tsc_offset;
966 u64 tsc_offset; /* current tsc offset */
967 u64 last_guest_tsc;
968 u64 last_host_tsc;
969 u64 tsc_offset_adjustment;
970 u64 this_tsc_nsec;
971 u64 this_tsc_write;
972 u64 this_tsc_generation;
973 bool tsc_catchup;
974 bool tsc_always_catchup;
975 s8 virtual_tsc_shift;
976 u32 virtual_tsc_mult;
977 u32 virtual_tsc_khz;
978 s64 ia32_tsc_adjust_msr;
979 u64 msr_ia32_power_ctl;
980 u64 l1_tsc_scaling_ratio;
981 u64 tsc_scaling_ratio; /* current scaling ratio */
982
983 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
984 /* Number of NMIs pending injection, not including hardware vNMIs. */
985 unsigned int nmi_pending;
986 bool nmi_injected; /* Trying to inject an NMI this entry */
987 bool smi_pending; /* SMI queued after currently running handler */
988 u8 handling_intr_from_guest;
989
990 struct kvm_mtrr mtrr_state;
991 u64 pat;
992
993 unsigned switch_db_regs;
994 unsigned long db[KVM_NR_DB_REGS];
995 unsigned long dr6;
996 unsigned long dr7;
997 unsigned long eff_db[KVM_NR_DB_REGS];
998 unsigned long guest_debug_dr7;
999 u64 msr_platform_info;
1000 u64 msr_misc_features_enables;
1001
1002 u64 mcg_cap;
1003 u64 mcg_status;
1004 u64 mcg_ctl;
1005 u64 mcg_ext_ctl;
1006 u64 *mce_banks;
1007 u64 *mci_ctl2_banks;
1008
1009 /* Cache MMIO info */
1010 u64 mmio_gva;
1011 unsigned mmio_access;
1012 gfn_t mmio_gfn;
1013 u64 mmio_gen;
1014
1015 struct kvm_pmu pmu;
1016
1017 /* used for guest single stepping over the given code position */
1018 unsigned long singlestep_rip;
1019
1020 #ifdef CONFIG_KVM_HYPERV
1021 bool hyperv_enabled;
1022 struct kvm_vcpu_hv *hyperv;
1023 #endif
1024 #ifdef CONFIG_KVM_XEN
1025 struct kvm_vcpu_xen xen;
1026 #endif
1027 cpumask_var_t wbinvd_dirty_mask;
1028
1029 unsigned long last_retry_eip;
1030 unsigned long last_retry_addr;
1031
1032 struct {
1033 bool halted;
1034 gfn_t gfns[ASYNC_PF_PER_VCPU];
1035 struct gfn_to_hva_cache data;
1036 u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
1037 u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
1038 u16 vec;
1039 u32 id;
1040 u32 host_apf_flags;
1041 bool send_always;
1042 bool delivery_as_pf_vmexit;
1043 bool pageready_pending;
1044 } apf;
1045
1046 /* OSVW MSRs (AMD only) */
1047 struct {
1048 u64 length;
1049 u64 status;
1050 } osvw;
1051
1052 struct {
1053 u64 msr_val;
1054 struct gfn_to_hva_cache data;
1055 } pv_eoi;
1056
1057 u64 msr_kvm_poll_control;
1058
1059 /* pv related host specific info */
1060 struct {
1061 bool pv_unhalted;
1062 } pv;
1063
1064 int pending_ioapic_eoi;
1065 int pending_external_vector;
1066 int highest_stale_pending_ioapic_eoi;
1067
1068 /* be preempted when it's in kernel-mode(cpl=0) */
1069 bool preempted_in_kernel;
1070
1071 /* Host CPU on which VM-entry was most recently attempted */
1072 int last_vmentry_cpu;
1073
1074 /* AMD MSRC001_0015 Hardware Configuration */
1075 u64 msr_hwcr;
1076
1077 /* pv related cpuid info */
1078 struct {
1079 /*
1080 * value of the eax register in the KVM_CPUID_FEATURES CPUID
1081 * leaf.
1082 */
1083 u32 features;
1084
1085 /*
1086 * indicates whether pv emulation should be disabled if features
1087 * are not present in the guest's cpuid
1088 */
1089 bool enforce;
1090 } pv_cpuid;
1091
1092 /* Protected Guests */
1093 bool guest_state_protected;
1094 bool guest_tsc_protected;
1095
1096 /*
1097 * Set when PDPTS were loaded directly by the userspace without
1098 * reading the guest memory
1099 */
1100 bool pdptrs_from_userspace;
1101
1102 /*
1103 * Set if an emulated nested VM-Enter to L2 is pending completion. KVM
1104 * must not synthesize a VM-Exit to L1 before entering L2, as VM-Exits
1105 * can only occur at instruction boundaries. The only exception is
1106 * VMX's "notify" exits, which exist in large part to break the CPU out
1107 * of infinite ucode loops, but can corrupt vCPU state in the process!
1108 *
1109 * For all intents and purposes, this is a boolean, but it's tracked as
1110 * a u8 so that KVM can detect when userspace may have stuffed vCPU
1111 * state and generated an architecturally-impossible VM-Exit.
1112 */
1113 #define KVM_NESTED_RUN_PENDING 1
1114 #define KVM_NESTED_RUN_PENDING_UNTRUSTED 2
1115 u8 nested_run_pending;
1116
1117 #if IS_ENABLED(CONFIG_HYPERV)
1118 hpa_t hv_root_tdp;
1119 #endif
1120 };
1121
1122 struct kvm_lpage_info {
1123 int disallow_lpage;
1124 };
1125
1126 struct kvm_arch_memory_slot {
1127 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
1128 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1129 unsigned short *gfn_write_track;
1130 };
1131
1132 /*
1133 * Track the mode of the optimized logical map, as the rules for decoding the
1134 * destination vary per mode. Enabling the optimized logical map requires all
1135 * software-enabled local APIs to be in the same mode, each addressable APIC to
1136 * be mapped to only one MDA, and each MDA to map to at most one APIC.
1137 */
1138 enum kvm_apic_logical_mode {
1139 /* All local APICs are software disabled. */
1140 KVM_APIC_MODE_SW_DISABLED,
1141 /* All software enabled local APICs in xAPIC cluster addressing mode. */
1142 KVM_APIC_MODE_XAPIC_CLUSTER,
1143 /* All software enabled local APICs in xAPIC flat addressing mode. */
1144 KVM_APIC_MODE_XAPIC_FLAT,
1145 /* All software enabled local APICs in x2APIC mode. */
1146 KVM_APIC_MODE_X2APIC,
1147 /*
1148 * Optimized map disabled, e.g. not all local APICs in the same logical
1149 * mode, same logical ID assigned to multiple APICs, etc.
1150 */
1151 KVM_APIC_MODE_MAP_DISABLED,
1152 };
1153
1154 struct kvm_apic_map {
1155 struct rcu_head rcu;
1156 enum kvm_apic_logical_mode logical_mode;
1157 u32 max_apic_id;
1158 union {
1159 struct kvm_lapic *xapic_flat_map[8];
1160 struct kvm_lapic *xapic_cluster_map[16][4];
1161 };
1162 struct kvm_lapic *phys_map[];
1163 };
1164
1165 /* Hyper-V synthetic debugger (SynDbg)*/
1166 struct kvm_hv_syndbg {
1167 struct {
1168 u64 control;
1169 u64 status;
1170 u64 send_page;
1171 u64 recv_page;
1172 u64 pending_page;
1173 } control;
1174 u64 options;
1175 };
1176
1177 /* Current state of Hyper-V TSC page clocksource */
1178 enum hv_tsc_page_status {
1179 /* TSC page was not set up or disabled */
1180 HV_TSC_PAGE_UNSET = 0,
1181 /* TSC page MSR was written by the guest, update pending */
1182 HV_TSC_PAGE_GUEST_CHANGED,
1183 /* TSC page update was triggered from the host side */
1184 HV_TSC_PAGE_HOST_CHANGED,
1185 /* TSC page was properly set up and is currently active */
1186 HV_TSC_PAGE_SET,
1187 /* TSC page was set up with an inaccessible GPA */
1188 HV_TSC_PAGE_BROKEN,
1189 };
1190
1191 #ifdef CONFIG_KVM_HYPERV
1192 /* Hyper-V emulation context */
1193 struct kvm_hv {
1194 struct mutex hv_lock;
1195 u64 hv_guest_os_id;
1196 u64 hv_hypercall;
1197 u64 hv_tsc_page;
1198 enum hv_tsc_page_status hv_tsc_page_status;
1199
1200 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1201 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
1202 u64 hv_crash_ctl;
1203
1204 struct ms_hyperv_tsc_page tsc_ref;
1205
1206 struct idr conn_to_evt;
1207
1208 u64 hv_reenlightenment_control;
1209 u64 hv_tsc_emulation_control;
1210 u64 hv_tsc_emulation_status;
1211 u64 hv_invtsc_control;
1212
1213 /* How many vCPUs have VP index != vCPU index */
1214 atomic_t num_mismatched_vp_indexes;
1215
1216 /*
1217 * How many SynICs use 'AutoEOI' feature
1218 * (protected by arch.apicv_update_lock)
1219 */
1220 unsigned int synic_auto_eoi_used;
1221
1222 struct kvm_hv_syndbg hv_syndbg;
1223
1224 bool xsaves_xsavec_checked;
1225 };
1226 #endif
1227
1228 struct msr_bitmap_range {
1229 u32 flags;
1230 u32 nmsrs;
1231 u32 base;
1232 unsigned long *bitmap;
1233 };
1234
1235 #ifdef CONFIG_KVM_XEN
1236 /* Xen emulation context */
1237 struct kvm_xen {
1238 struct mutex xen_lock;
1239 u32 xen_version;
1240 bool long_mode;
1241 bool runstate_update_flag;
1242 u8 upcall_vector;
1243 struct gfn_to_pfn_cache shinfo_cache;
1244 struct idr evtchn_ports;
1245 unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
1246
1247 struct kvm_xen_hvm_config hvm_config;
1248 };
1249 #endif
1250
1251 enum kvm_irqchip_mode {
1252 KVM_IRQCHIP_NONE,
1253 #ifdef CONFIG_KVM_IOAPIC
1254 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
1255 #endif
1256 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
1257 };
1258
1259 enum kvm_suppress_eoi_broadcast_mode {
1260 KVM_SUPPRESS_EOI_BROADCAST_QUIRKED, /* Legacy behavior */
1261 KVM_SUPPRESS_EOI_BROADCAST_ENABLED, /* Enable Suppress EOI broadcast */
1262 KVM_SUPPRESS_EOI_BROADCAST_DISABLED /* Disable Suppress EOI broadcast */
1263 };
1264
1265 struct kvm_x86_msr_filter {
1266 u8 count;
1267 bool default_allow:1;
1268 struct msr_bitmap_range ranges[16];
1269 };
1270
1271 struct kvm_x86_pmu_event_filter {
1272 __u32 action;
1273 __u32 nevents;
1274 __u32 fixed_counter_bitmap;
1275 __u32 flags;
1276 __u32 nr_includes;
1277 __u32 nr_excludes;
1278 __u64 *includes;
1279 __u64 *excludes;
1280 __u64 events[] __counted_by(nevents);
1281 };
1282
1283 enum kvm_apicv_inhibit {
1284
1285 /********************************************************************/
1286 /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
1287 /********************************************************************/
1288
1289 /*
1290 * APIC acceleration is disabled by a module parameter
1291 * and/or not supported in hardware.
1292 */
1293 APICV_INHIBIT_REASON_DISABLED,
1294
1295 /*
1296 * APIC acceleration is inhibited because AutoEOI feature is
1297 * being used by a HyperV guest.
1298 */
1299 APICV_INHIBIT_REASON_HYPERV,
1300
1301 /*
1302 * APIC acceleration is inhibited because the userspace didn't yet
1303 * enable the kernel/split irqchip.
1304 */
1305 APICV_INHIBIT_REASON_ABSENT,
1306
1307 /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
1308 * (out of band, debug measure of blocking all interrupts on this vCPU)
1309 * was enabled, to avoid AVIC/APICv bypassing it.
1310 */
1311 APICV_INHIBIT_REASON_BLOCKIRQ,
1312
1313 /*
1314 * APICv is disabled because not all vCPUs have a 1:1 mapping between
1315 * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack.
1316 */
1317 APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED,
1318
1319 /*
1320 * For simplicity, the APIC acceleration is inhibited
1321 * first time either APIC ID or APIC base are changed by the guest
1322 * from their reset values.
1323 */
1324 APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
1325 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
1326
1327 /******************************************************/
1328 /* INHIBITs that are relevant only to the AMD's AVIC. */
1329 /******************************************************/
1330
1331 /*
1332 * AVIC is inhibited on a vCPU because it runs a nested guest.
1333 *
1334 * This is needed because unlike APICv, the peers of this vCPU
1335 * cannot use the doorbell mechanism to signal interrupts via AVIC when
1336 * a vCPU runs nested.
1337 */
1338 APICV_INHIBIT_REASON_NESTED,
1339
1340 /*
1341 * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
1342 * which cannot be injected when the AVIC is enabled, thus AVIC
1343 * is inhibited while KVM waits for IRQ window.
1344 */
1345 APICV_INHIBIT_REASON_IRQWIN,
1346
1347 /*
1348 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1349 * which AVIC doesn't support for edge triggered interrupts.
1350 */
1351 APICV_INHIBIT_REASON_PIT_REINJ,
1352
1353 /*
1354 * AVIC is disabled because SEV doesn't support it.
1355 */
1356 APICV_INHIBIT_REASON_SEV,
1357
1358 /*
1359 * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1
1360 * mapping between logical ID and vCPU.
1361 */
1362 APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED,
1363
1364 /*
1365 * AVIC is disabled because the vCPU's APIC ID is beyond the max
1366 * supported by AVIC/x2AVIC, i.e. the vCPU is unaddressable.
1367 */
1368 APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG,
1369
1370 NR_APICV_INHIBIT_REASONS,
1371 };
1372
1373 #define __APICV_INHIBIT_REASON(reason) \
1374 { BIT(APICV_INHIBIT_REASON_##reason), #reason }
1375
1376 #define APICV_INHIBIT_REASONS \
1377 __APICV_INHIBIT_REASON(DISABLED), \
1378 __APICV_INHIBIT_REASON(HYPERV), \
1379 __APICV_INHIBIT_REASON(ABSENT), \
1380 __APICV_INHIBIT_REASON(BLOCKIRQ), \
1381 __APICV_INHIBIT_REASON(PHYSICAL_ID_ALIASED), \
1382 __APICV_INHIBIT_REASON(APIC_ID_MODIFIED), \
1383 __APICV_INHIBIT_REASON(APIC_BASE_MODIFIED), \
1384 __APICV_INHIBIT_REASON(NESTED), \
1385 __APICV_INHIBIT_REASON(IRQWIN), \
1386 __APICV_INHIBIT_REASON(PIT_REINJ), \
1387 __APICV_INHIBIT_REASON(SEV), \
1388 __APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED), \
1389 __APICV_INHIBIT_REASON(PHYSICAL_ID_TOO_BIG)
1390
1391 struct kvm_possible_nx_huge_pages {
1392 /*
1393 * A list of kvm_mmu_page structs that, if zapped, could possibly be
1394 * replaced by an NX huge page. A shadow page is on this list if its
1395 * existence disallows an NX huge page (nx_huge_page_disallowed is set)
1396 * and there are no other conditions that prevent a huge page, e.g.
1397 * the backing host page is huge, dirtly logging is not enabled for its
1398 * memslot, etc... Note, zapping shadow pages on this list doesn't
1399 * guarantee an NX huge page will be created in its stead, e.g. if the
1400 * guest attempts to execute from the region then KVM obviously can't
1401 * create an NX huge page (without hanging the guest).
1402 */
1403 struct list_head pages;
1404 u64 nr_pages;
1405 };
1406
1407 enum kvm_mmu_type {
1408 KVM_SHADOW_MMU,
1409 #ifdef CONFIG_X86_64
1410 KVM_TDP_MMU,
1411 #endif
1412 KVM_NR_MMU_TYPES,
1413 };
1414
1415 struct kvm_arch {
1416 unsigned long n_used_mmu_pages;
1417 unsigned long n_requested_mmu_pages;
1418 unsigned long n_max_mmu_pages;
1419 unsigned int indirect_shadow_pages;
1420 u8 mmu_valid_gen;
1421 u8 vm_type;
1422 bool has_private_mem;
1423 bool has_protected_state;
1424 bool has_protected_eoi;
1425 bool pre_fault_allowed;
1426 struct hlist_head *mmu_page_hash;
1427 struct list_head active_mmu_pages;
1428 struct kvm_possible_nx_huge_pages possible_nx_huge_pages[KVM_NR_MMU_TYPES];
1429 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1430 struct kvm_page_track_notifier_head track_notifier_head;
1431 #endif
1432 /*
1433 * Protects marking pages unsync during page faults, as TDP MMU page
1434 * faults only take mmu_lock for read. For simplicity, the unsync
1435 * pages lock is always taken when marking pages unsync regardless of
1436 * whether mmu_lock is held for read or write.
1437 */
1438 spinlock_t mmu_unsync_pages_lock;
1439
1440 u64 shadow_mmio_value;
1441
1442 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1443 atomic_t noncoherent_dma_count;
1444 unsigned long nr_possible_bypass_irqs;
1445
1446 #ifdef CONFIG_KVM_IOAPIC
1447 struct kvm_pic *vpic;
1448 struct kvm_ioapic *vioapic;
1449 struct kvm_pit *vpit;
1450 #endif
1451 atomic_t vapics_in_nmi_mode;
1452
1453 struct mutex apic_map_lock;
1454 struct kvm_apic_map __rcu *apic_map;
1455 atomic_t apic_map_dirty;
1456
1457 bool apic_access_memslot_enabled;
1458 bool apic_access_memslot_inhibited;
1459
1460 /*
1461 * Force apicv_update_lock and apicv_nr_irq_window_req to reside in a
1462 * dedicated cacheline. They are write-mostly, whereas most everything
1463 * else in kvm_arch is read-mostly. Note that apicv_inhibit_reasons is
1464 * read-mostly: toggling VM-wide inhibits is rare; _checking_ for
1465 * inhibits is common.
1466 */
1467 ____cacheline_aligned
1468 /*
1469 * Protects apicv_inhibit_reasons and apicv_nr_irq_window_req (with an
1470 * asterisk, see kvm_inc_or_dec_irq_window_inhibit() for details).
1471 */
1472 struct rw_semaphore apicv_update_lock;
1473 atomic_t apicv_nr_irq_window_req;
1474 ____cacheline_aligned
1475
1476 unsigned long apicv_inhibit_reasons;
1477
1478 gpa_t wall_clock;
1479
1480 u64 disabled_exits;
1481
1482 s64 kvmclock_offset;
1483
1484 /*
1485 * This also protects nr_vcpus_matched_tsc which is read from a
1486 * preemption-disabled region, so it must be a raw spinlock.
1487 */
1488 raw_spinlock_t tsc_write_lock;
1489 u64 last_tsc_nsec;
1490 u64 last_tsc_write;
1491 u32 last_tsc_khz;
1492 u64 last_tsc_offset;
1493 u64 cur_tsc_nsec;
1494 u64 cur_tsc_write;
1495 u64 cur_tsc_offset;
1496 u64 cur_tsc_generation;
1497 int nr_vcpus_matched_tsc;
1498
1499 u32 default_tsc_khz;
1500 bool user_set_tsc;
1501 u64 apic_bus_cycle_ns;
1502
1503 seqcount_raw_spinlock_t pvclock_sc;
1504 bool use_master_clock;
1505 u64 master_kernel_ns;
1506 u64 master_cycle_now;
1507
1508 #ifdef CONFIG_KVM_HYPERV
1509 struct kvm_hv hyperv;
1510 #endif
1511
1512 #ifdef CONFIG_KVM_XEN
1513 struct kvm_xen xen;
1514 #endif
1515
1516 bool backwards_tsc_observed;
1517 bool boot_vcpu_runs_old_kvmclock;
1518 u32 bsp_vcpu_id;
1519
1520 u64 disabled_quirks;
1521
1522 enum kvm_irqchip_mode irqchip_mode;
1523 u8 nr_reserved_ioapic_pins;
1524
1525 bool disabled_lapic_found;
1526
1527 bool x2apic_format;
1528 bool x2apic_broadcast_quirk_disabled;
1529 enum kvm_suppress_eoi_broadcast_mode suppress_eoi_broadcast_mode;
1530
1531 bool has_mapped_host_mmio;
1532 bool guest_can_read_msr_platform_info;
1533 bool exception_payload_enabled;
1534
1535 bool triple_fault_event;
1536
1537 bool bus_lock_detection_enabled;
1538 bool enable_pmu;
1539 bool created_mediated_pmu;
1540
1541 u32 notify_window;
1542 u32 notify_vmexit_flags;
1543 /*
1544 * If exit_on_emulation_error is set, and the in-kernel instruction
1545 * emulator fails to emulate an instruction, allow userspace
1546 * the opportunity to look at it.
1547 */
1548 bool exit_on_emulation_error;
1549
1550 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1551 u32 user_space_msr_mask;
1552 struct kvm_x86_msr_filter __rcu *msr_filter;
1553
1554 u32 hypercall_exit_enabled;
1555
1556 /* Guest can access the SGX PROVISIONKEY. */
1557 bool sgx_provisioning_allowed;
1558
1559 struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
1560 struct vhost_task *nx_huge_page_recovery_thread;
1561 u64 nx_huge_page_last;
1562 struct once nx_once;
1563
1564 #ifdef CONFIG_X86_64
1565 #ifdef CONFIG_KVM_PROVE_MMU
1566 /*
1567 * The number of TDP MMU pages across all roots. Used only to sanity
1568 * check that KVM isn't leaking TDP MMU pages.
1569 */
1570 atomic64_t tdp_mmu_pages;
1571 #endif
1572
1573 /*
1574 * List of struct kvm_mmu_pages being used as roots.
1575 * All struct kvm_mmu_pages in the list should have
1576 * tdp_mmu_page set.
1577 *
1578 * For reads, this list is protected by:
1579 * RCU alone or
1580 * the MMU lock in read mode + RCU or
1581 * the MMU lock in write mode
1582 *
1583 * For writes, this list is protected by tdp_mmu_pages_lock; see
1584 * below for the details.
1585 *
1586 * Roots will remain in the list until their tdp_mmu_root_count
1587 * drops to zero, at which point the thread that decremented the
1588 * count to zero should removed the root from the list and clean
1589 * it up, freeing the root after an RCU grace period.
1590 */
1591 struct list_head tdp_mmu_roots;
1592
1593 /*
1594 * Protects accesses to the following fields when the MMU lock
1595 * is held in read mode:
1596 * - tdp_mmu_roots (above)
1597 * - the link field of kvm_mmu_page structs used by the TDP MMU
1598 * - possible_nx_huge_pages[KVM_TDP_MMU];
1599 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1600 * by the TDP MMU
1601 * Because the lock is only taken within the MMU lock, strictly
1602 * speaking it is redundant to acquire this lock when the thread
1603 * holds the MMU lock in write mode. However it often simplifies
1604 * the code to do so.
1605 */
1606 spinlock_t tdp_mmu_pages_lock;
1607 #endif /* CONFIG_X86_64 */
1608
1609 /*
1610 * If set, at least one shadow root has been allocated. This flag
1611 * is used as one input when determining whether certain memslot
1612 * related allocations are necessary.
1613 */
1614 bool shadow_root_allocated;
1615
1616 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1617 /*
1618 * If set, the VM has (or had) an external write tracking user, and
1619 * thus all write tracking metadata has been allocated, even if KVM
1620 * itself isn't using write tracking.
1621 */
1622 bool external_write_tracking_enabled;
1623 #endif
1624
1625 #if IS_ENABLED(CONFIG_HYPERV)
1626 hpa_t hv_root_tdp;
1627 spinlock_t hv_root_tdp_lock;
1628 struct hv_partition_assist_pg *hv_pa_pg;
1629 #endif
1630 /*
1631 * VM-scope maximum vCPU ID. Used to determine the size of structures
1632 * that increase along with the maximum vCPU ID, in which case, using
1633 * the global KVM_MAX_VCPU_IDS may lead to significant memory waste.
1634 */
1635 u32 max_vcpu_ids;
1636
1637 bool disable_nx_huge_pages;
1638
1639 /*
1640 * Memory caches used to allocate shadow pages when performing eager
1641 * page splitting. No need for a shadowed_info_cache since eager page
1642 * splitting only allocates direct shadow pages.
1643 *
1644 * Protected by kvm->slots_lock.
1645 */
1646 struct kvm_mmu_memory_cache split_shadow_page_cache;
1647 struct kvm_mmu_memory_cache split_page_header_cache;
1648
1649 /*
1650 * Memory cache used to allocate pte_list_desc structs while splitting
1651 * huge pages. In the worst case, to split one huge page, 512
1652 * pte_list_desc structs are needed to add each lower level leaf sptep
1653 * to the rmap plus 1 to extend the parent_ptes rmap of the lower level
1654 * page table.
1655 *
1656 * Protected by kvm->slots_lock.
1657 */
1658 #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1659 struct kvm_mmu_memory_cache split_desc_cache;
1660
1661 gfn_t gfn_direct_bits;
1662
1663 /*
1664 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero
1665 * value indicates CPU dirty logging is unsupported or disabled in
1666 * current VM.
1667 */
1668 int cpu_dirty_log_size;
1669 };
1670
1671 struct kvm_vm_stat {
1672 struct kvm_vm_stat_generic generic;
1673 u64 mmu_shadow_zapped;
1674 u64 mmu_pte_write;
1675 u64 mmu_pde_zapped;
1676 u64 mmu_flooded;
1677 u64 mmu_recycled;
1678 u64 mmu_cache_miss;
1679 u64 mmu_unsync;
1680 union {
1681 struct {
1682 atomic64_t pages_4k;
1683 atomic64_t pages_2m;
1684 atomic64_t pages_1g;
1685 };
1686 atomic64_t pages[KVM_NR_PAGE_SIZES];
1687 };
1688 u64 nx_lpage_splits;
1689 u64 max_mmu_page_hash_collisions;
1690 u64 max_mmu_rmap_size;
1691 };
1692
1693 struct kvm_vcpu_stat {
1694 struct kvm_vcpu_stat_generic generic;
1695 u64 pf_taken;
1696 u64 pf_fixed;
1697 u64 pf_emulate;
1698 u64 pf_spurious;
1699 u64 pf_fast;
1700 u64 pf_mmio_spte_created;
1701 u64 pf_guest;
1702 u64 tlb_flush;
1703 u64 invlpg;
1704
1705 u64 exits;
1706 u64 io_exits;
1707 u64 mmio_exits;
1708 u64 signal_exits;
1709 u64 irq_window_exits;
1710 u64 nmi_window_exits;
1711 u64 l1d_flush;
1712 u64 halt_exits;
1713 u64 request_irq_exits;
1714 u64 irq_exits;
1715 u64 host_state_reload;
1716 u64 fpu_reload;
1717 u64 insn_emulation;
1718 u64 insn_emulation_fail;
1719 u64 hypercalls;
1720 u64 irq_injections;
1721 u64 nmi_injections;
1722 u64 req_event;
1723 u64 nested_run;
1724 u64 directed_yield_attempted;
1725 u64 directed_yield_successful;
1726 u64 preemption_reported;
1727 u64 preemption_other;
1728 u64 guest_mode;
1729 u64 notify_window_exits;
1730 };
1731
1732 struct x86_instruction_info;
1733
1734 struct msr_data {
1735 bool host_initiated;
1736 u32 index;
1737 u64 data;
1738 };
1739
1740 struct kvm_lapic_irq {
1741 u32 vector;
1742 u16 delivery_mode;
1743 u16 dest_mode;
1744 bool level;
1745 u16 trig_mode;
1746 u32 shorthand;
1747 u32 dest_id;
1748 bool msi_redir_hint;
1749 };
1750
kvm_lapic_irq_dest_mode(bool dest_mode_logical)1751 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1752 {
1753 return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1754 }
1755
1756 enum kvm_x86_run_flags {
1757 KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0),
1758 KVM_RUN_LOAD_GUEST_DR6 = BIT(1),
1759 KVM_RUN_LOAD_DEBUGCTL = BIT(2),
1760 };
1761
1762 struct kvm_x86_ops {
1763 const char *name;
1764
1765 int (*check_processor_compatibility)(void);
1766
1767 int (*enable_virtualization_cpu)(void);
1768 void (*disable_virtualization_cpu)(void);
1769 cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
1770
1771 void (*hardware_unsetup)(void);
1772 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1773 void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1774
1775 unsigned int vm_size;
1776 int (*vm_init)(struct kvm *kvm);
1777 void (*vm_destroy)(struct kvm *kvm);
1778 void (*vm_pre_destroy)(struct kvm *kvm);
1779
1780 /* Create, but do not attach this VCPU */
1781 int (*vcpu_precreate)(struct kvm *kvm);
1782 int (*vcpu_create)(struct kvm_vcpu *vcpu);
1783 void (*vcpu_free)(struct kvm_vcpu *vcpu);
1784 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1785
1786 void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu);
1787 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1788 void (*vcpu_put)(struct kvm_vcpu *vcpu);
1789
1790 /*
1791 * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to
1792 * match the host's value even while the guest is active.
1793 */
1794 const u64 HOST_OWNED_DEBUGCTL;
1795
1796 void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1797 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1798 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1799 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1800 void (*get_segment)(struct kvm_vcpu *vcpu,
1801 struct kvm_segment *var, int seg);
1802 int (*get_cpl)(struct kvm_vcpu *vcpu);
1803 int (*get_cpl_no_cache)(struct kvm_vcpu *vcpu);
1804 void (*set_segment)(struct kvm_vcpu *vcpu,
1805 struct kvm_segment *var, int seg);
1806 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1807 bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1808 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1809 void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1810 bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1811 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1812 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1813 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1814 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1815 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1816 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1817 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1818 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1819 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1820 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1821 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1822 bool (*get_if_flag)(struct kvm_vcpu *vcpu);
1823
1824 void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
1825 void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1826 #if IS_ENABLED(CONFIG_HYPERV)
1827 int (*flush_remote_tlbs)(struct kvm *kvm);
1828 int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
1829 gfn_t nr_pages);
1830 #endif
1831
1832 /*
1833 * Flush any TLB entries associated with the given GVA.
1834 * Does not need to flush GPA->HPA mappings.
1835 * Can potentially get non-canonical addresses through INVLPGs, which
1836 * the implementation may choose to ignore if appropriate.
1837 */
1838 void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1839
1840 /*
1841 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1842 * does not need to flush GPA->HPA mappings.
1843 */
1844 void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
1845
1846 int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1847 enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
1848 u64 run_flags);
1849 int (*handle_exit)(struct kvm_vcpu *vcpu,
1850 enum exit_fastpath_completion exit_fastpath);
1851 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1852 void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1853 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1854 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1855 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1856 unsigned char *hypercall_addr);
1857 void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
1858 void (*inject_nmi)(struct kvm_vcpu *vcpu);
1859 void (*inject_exception)(struct kvm_vcpu *vcpu);
1860 void (*cancel_injection)(struct kvm_vcpu *vcpu);
1861 int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1862 int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1863 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1864 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1865 /* Whether or not a virtual NMI is pending in hardware. */
1866 bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu);
1867 /*
1868 * Attempt to pend a virtual NMI in hardware. Returns %true on success
1869 * to allow using static_call_ret0 as the fallback.
1870 */
1871 bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu);
1872 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1873 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1874 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1875
1876 const bool x2apic_icr_is_split;
1877 const unsigned long required_apicv_inhibits;
1878 bool allow_apicv_in_x2apic_without_x2apic_virtualization;
1879 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1880 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1881 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1882 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1883 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1884 void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
1885 int trig_mode, int vector);
1886 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1887 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1888 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1889 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1890
1891 void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1892 int root_level);
1893
1894 /* Update external mapping with page table link. */
1895 int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1896 void *external_spt);
1897 /* Update the external page table from spte getting set. */
1898 int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1899 u64 mirror_spte);
1900
1901 /* Update external page tables for page table about to be freed. */
1902 int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1903 void *external_spt);
1904
1905 /* Update external page table from spte getting removed, and flush TLB. */
1906 void (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1907 u64 mirror_spte);
1908
1909 bool (*has_wbinvd_exit)(void);
1910
1911 u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1912 u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1913 void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
1914 void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
1915
1916 /*
1917 * Retrieve somewhat arbitrary exit/entry information. Intended to
1918 * be used only from within tracepoints or error paths.
1919 */
1920 void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
1921 u64 *info1, u64 *info2,
1922 u32 *intr_info, u32 *error_code);
1923
1924 void (*get_entry_info)(struct kvm_vcpu *vcpu,
1925 u32 *intr_info, u32 *error_code);
1926
1927 int (*check_intercept)(struct kvm_vcpu *vcpu,
1928 struct x86_instruction_info *info,
1929 enum x86_intercept_stage stage,
1930 struct x86_exception *exception);
1931 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1932
1933 void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
1934
1935 const struct kvm_x86_nested_ops *nested_ops;
1936
1937 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1938 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1939
1940 int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
1941 unsigned int host_irq, uint32_t guest_irq,
1942 struct kvm_vcpu *vcpu, u32 vector);
1943 void (*pi_start_bypass)(struct kvm *kvm);
1944 void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
1945 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1946 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1947 bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1948
1949 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1950 bool *expired);
1951 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1952
1953 void (*setup_mce)(struct kvm_vcpu *vcpu);
1954
1955 #ifdef CONFIG_KVM_SMM
1956 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1957 int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
1958 int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
1959 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1960 #endif
1961
1962 int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
1963 int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
1964 int (*vcpu_mem_enc_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
1965 int (*vcpu_mem_enc_unlocked_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
1966 int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1967 int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1968 int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1969 int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1970 void (*guest_memory_reclaimed)(struct kvm *kvm);
1971
1972 int (*get_feature_msr)(u32 msr, u64 *data);
1973
1974 int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1975 void *insn, int insn_len);
1976
1977 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1978 int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
1979
1980 void (*migrate_timers)(struct kvm_vcpu *vcpu);
1981 void (*recalc_intercepts)(struct kvm_vcpu *vcpu);
1982 int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1983
1984 void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1985
1986 /*
1987 * Returns vCPU specific APICv inhibit reasons
1988 */
1989 unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
1990
1991 gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
1992 void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
1993 int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
1994 void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
1995 int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
1996 };
1997
1998 struct kvm_x86_nested_ops {
1999 void (*leave_nested)(struct kvm_vcpu *vcpu);
2000 bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
2001 u32 error_code);
2002 int (*check_events)(struct kvm_vcpu *vcpu);
2003 bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
2004 void (*triple_fault)(struct kvm_vcpu *vcpu);
2005 int (*get_state)(struct kvm_vcpu *vcpu,
2006 struct kvm_nested_state __user *user_kvm_nested_state,
2007 unsigned user_data_size);
2008 int (*set_state)(struct kvm_vcpu *vcpu,
2009 struct kvm_nested_state __user *user_kvm_nested_state,
2010 struct kvm_nested_state *kvm_state);
2011 bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
2012 int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
2013
2014 int (*enable_evmcs)(struct kvm_vcpu *vcpu,
2015 uint16_t *vmcs_version);
2016 uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
2017 void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu);
2018 };
2019
2020 struct kvm_x86_init_ops {
2021 int (*hardware_setup)(void);
2022 unsigned int (*handle_intel_pt_intr)(void);
2023
2024 struct kvm_x86_ops *runtime_ops;
2025 struct kvm_pmu_ops *pmu_ops;
2026 };
2027
2028 struct kvm_arch_async_pf {
2029 u32 token;
2030 gfn_t gfn;
2031 unsigned long cr3;
2032 bool direct_map;
2033 u64 error_code;
2034 };
2035
2036 extern u32 __read_mostly kvm_nr_uret_msrs;
2037 extern bool __read_mostly allow_smaller_maxphyaddr;
2038 extern bool __read_mostly enable_apicv;
2039 extern bool __read_mostly enable_ipiv;
2040 extern bool __read_mostly enable_device_posted_irqs;
2041 extern struct kvm_x86_ops kvm_x86_ops;
2042
2043 #define kvm_x86_call(func) static_call(kvm_x86_##func)
2044 #define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
2045
2046 #define KVM_X86_OP(func) \
2047 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
2048 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
2049 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
2050 #include <asm/kvm-x86-ops.h>
2051
2052 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
2053 void kvm_x86_vendor_exit(void);
2054
2055 #define __KVM_HAVE_ARCH_VM_ALLOC
kvm_arch_alloc_vm(void)2056 static inline struct kvm *kvm_arch_alloc_vm(void)
2057 {
2058 return kvzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT);
2059 }
2060
2061 #define __KVM_HAVE_ARCH_VM_FREE
2062 void kvm_arch_free_vm(struct kvm *kvm);
2063
2064 #if IS_ENABLED(CONFIG_HYPERV)
2065 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
kvm_arch_flush_remote_tlbs(struct kvm * kvm)2066 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
2067 {
2068 if (kvm_x86_ops.flush_remote_tlbs &&
2069 !kvm_x86_call(flush_remote_tlbs)(kvm))
2070 return 0;
2071 else
2072 return -ENOTSUPP;
2073 }
2074
2075 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)2076 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
2077 u64 nr_pages)
2078 {
2079 if (!kvm_x86_ops.flush_remote_tlbs_range)
2080 return -EOPNOTSUPP;
2081
2082 return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages);
2083 }
2084 #endif /* CONFIG_HYPERV */
2085
2086 enum kvm_intr_type {
2087 /* Values are arbitrary, but must be non-zero. */
2088 KVM_HANDLING_IRQ = 1,
2089 KVM_HANDLING_NMI,
2090 };
2091
2092 /* Enable perf NMI and timer modes to work, and minimise false positives. */
2093 #define kvm_arch_pmi_in_guest(vcpu) \
2094 ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
2095 (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
2096
2097 void __init kvm_mmu_x86_module_init(void);
2098 int kvm_mmu_vendor_module_init(void);
2099 void kvm_mmu_vendor_module_exit(void);
2100
2101 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
2102 int kvm_mmu_create(struct kvm_vcpu *vcpu);
2103 int kvm_mmu_init_vm(struct kvm *kvm);
2104 void kvm_mmu_uninit_vm(struct kvm *kvm);
2105
2106 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
2107 struct kvm_memory_slot *slot);
2108
2109 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
2110 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
2111 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
2112 const struct kvm_memory_slot *memslot,
2113 int start_level);
2114 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
2115 const struct kvm_memory_slot *memslot,
2116 int target_level);
2117 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
2118 const struct kvm_memory_slot *memslot,
2119 u64 start, u64 end,
2120 int target_level);
2121 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
2122 const struct kvm_memory_slot *memslot);
2123 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
2124 const struct kvm_memory_slot *memslot);
2125 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
2126 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
2127 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
2128
2129 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
2130
2131 extern bool tdp_enabled;
2132
2133 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
2134
2135 /*
2136 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
2137 * userspace I/O) to indicate that the emulation context
2138 * should be reused as is, i.e. skip initialization of
2139 * emulation context, instruction fetch and decode.
2140 *
2141 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
2142 * Indicates that only select instructions (tagged with
2143 * EmulateOnUD) should be emulated (to minimize the emulator
2144 * attack surface). See also EMULTYPE_TRAP_UD_FORCED.
2145 *
2146 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
2147 * decode the instruction length. For use *only* by
2148 * kvm_x86_ops.skip_emulated_instruction() implementations if
2149 * EMULTYPE_COMPLETE_USER_EXIT is not set.
2150 *
2151 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
2152 * retry native execution under certain conditions,
2153 * Can only be set in conjunction with EMULTYPE_PF.
2154 *
2155 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
2156 * triggered by KVM's magic "force emulation" prefix,
2157 * which is opt in via module param (off by default).
2158 * Bypasses EmulateOnUD restriction despite emulating
2159 * due to an intercepted #UD (see EMULTYPE_TRAP_UD).
2160 * Used to test the full emulator from userspace.
2161 *
2162 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
2163 * backdoor emulation, which is opt in via module param.
2164 * VMware backdoor emulation handles select instructions
2165 * and reinjects the #GP for all other cases.
2166 *
2167 * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
2168 * the CR2/GPA value pass on the stack is valid.
2169 *
2170 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
2171 * state and inject single-step #DBs after skipping
2172 * an instruction (after completing userspace I/O).
2173 *
2174 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
2175 * is attempting to write a gfn that contains one or
2176 * more of the PTEs used to translate the write itself,
2177 * and the owning page table is being shadowed by KVM.
2178 * If emulation of the faulting instruction fails and
2179 * this flag is set, KVM will exit to userspace instead
2180 * of retrying emulation as KVM cannot make forward
2181 * progress.
2182 *
2183 * If emulation fails for a write to guest page tables,
2184 * KVM unprotects (zaps) the shadow page for the target
2185 * gfn and resumes the guest to retry the non-emulatable
2186 * instruction (on hardware). Unprotecting the gfn
2187 * doesn't allow forward progress for a self-changing
2188 * access because doing so also zaps the translation for
2189 * the gfn, i.e. retrying the instruction will hit a
2190 * !PRESENT fault, which results in a new shadow page
2191 * and sends KVM back to square one.
2192 *
2193 * EMULTYPE_SKIP_SOFT_INT - Set in combination with EMULTYPE_SKIP to only skip
2194 * an instruction if it could generate a given software
2195 * interrupt, which must be encoded via
2196 * EMULTYPE_SET_SOFT_INT_VECTOR().
2197 */
2198 #define EMULTYPE_NO_DECODE (1 << 0)
2199 #define EMULTYPE_TRAP_UD (1 << 1)
2200 #define EMULTYPE_SKIP (1 << 2)
2201 #define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
2202 #define EMULTYPE_TRAP_UD_FORCED (1 << 4)
2203 #define EMULTYPE_VMWARE_GP (1 << 5)
2204 #define EMULTYPE_PF (1 << 6)
2205 #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
2206 #define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
2207 #define EMULTYPE_SKIP_SOFT_INT (1 << 9)
2208
2209 #define EMULTYPE_SET_SOFT_INT_VECTOR(v) ((u32)((v) & 0xff) << 16)
2210 #define EMULTYPE_GET_SOFT_INT_VECTOR(e) (((e) >> 16) & 0xff)
2211
kvm_can_emulate_event_vectoring(int emul_type)2212 static inline bool kvm_can_emulate_event_vectoring(int emul_type)
2213 {
2214 return !(emul_type & EMULTYPE_PF);
2215 }
2216
2217 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
2218 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
2219 void *insn, int insn_len);
2220 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
2221 u64 *data, u8 ndata);
2222 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
2223
2224 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
2225 void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason);
2226
2227 void kvm_enable_efer_bits(u64);
2228 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2229 int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2230 int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2231 int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2232 int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2233 int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2234 int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2235 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
2236 int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
2237 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
2238 int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
2239 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
2240 int kvm_emulate_invd(struct kvm_vcpu *vcpu);
2241 int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
2242 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
2243 int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
2244
2245 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
2246 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
2247 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
2248 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
2249 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
2250 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
2251
2252 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2253 void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2254 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2255 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
2256
2257 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
2258 int reason, bool has_error_code, u32 error_code);
2259
2260 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
2261 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
2262 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2263 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
2264 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2265 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
2266 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
2267 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
2268 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
2269 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
2270 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
2271 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
2272
2273 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2274 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2275
2276 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
2277 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
2278 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
2279
2280 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2281 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2282 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
2283 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
2284 bool has_error_code, u32 error_code);
2285 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
2286 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
2287 struct x86_exception *fault);
2288 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
2289 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
2290
__kvm_irq_line_state(unsigned long * irq_state,int irq_source_id,int level)2291 static inline int __kvm_irq_line_state(unsigned long *irq_state,
2292 int irq_source_id, int level)
2293 {
2294 /* Logical OR for level trig interrupt */
2295 if (level)
2296 __set_bit(irq_source_id, irq_state);
2297 else
2298 __clear_bit(irq_source_id, irq_state);
2299
2300 return !!(*irq_state);
2301 }
2302
2303 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
2304 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
2305
2306 void kvm_update_dr7(struct kvm_vcpu *vcpu);
2307
2308 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2309 bool always_retry);
2310
kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa)2311 static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
2312 gpa_t cr2_or_gpa)
2313 {
2314 return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
2315 }
2316
2317 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
2318 ulong roots_to_free);
2319 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
2320 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
2321 struct x86_exception *exception);
2322 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
2323 struct x86_exception *exception);
2324 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
2325 struct x86_exception *exception);
2326
2327 bool kvm_apicv_activated(struct kvm *kvm);
2328 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
2329 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
2330 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2331 enum kvm_apicv_inhibit reason, bool set);
2332 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2333 enum kvm_apicv_inhibit reason, bool set);
2334
kvm_set_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2335 static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
2336 enum kvm_apicv_inhibit reason)
2337 {
2338 kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
2339 }
2340
kvm_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2341 static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
2342 enum kvm_apicv_inhibit reason)
2343 {
2344 kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
2345 }
2346
2347 void kvm_inc_or_dec_irq_window_inhibit(struct kvm *kvm, bool inc);
2348
kvm_inc_apicv_irq_window_req(struct kvm * kvm)2349 static inline void kvm_inc_apicv_irq_window_req(struct kvm *kvm)
2350 {
2351 kvm_inc_or_dec_irq_window_inhibit(kvm, true);
2352 }
2353
kvm_dec_apicv_irq_window_req(struct kvm * kvm)2354 static inline void kvm_dec_apicv_irq_window_req(struct kvm *kvm)
2355 {
2356 kvm_inc_or_dec_irq_window_inhibit(kvm, false);
2357 }
2358
2359 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
2360 void *insn, int insn_len);
2361 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
2362 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
2363 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2364 u64 addr, unsigned long roots);
2365 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
2366 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
2367
2368 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
2369 int tdp_max_root_level, int tdp_huge_page_level);
2370
2371
2372 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2373 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
2374 #endif
2375
2376 #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
2377
kvm_read_ldt(void)2378 static inline u16 kvm_read_ldt(void)
2379 {
2380 u16 ldt;
2381 asm("sldt %0" : "=g"(ldt));
2382 return ldt;
2383 }
2384
kvm_load_ldt(u16 sel)2385 static inline void kvm_load_ldt(u16 sel)
2386 {
2387 asm("lldt %0" : : "rm"(sel));
2388 }
2389
2390 #ifdef CONFIG_X86_64
read_msr(unsigned long msr)2391 static inline unsigned long read_msr(unsigned long msr)
2392 {
2393 u64 value;
2394
2395 rdmsrq(msr, value);
2396 return value;
2397 }
2398 #endif
2399
kvm_inject_gp(struct kvm_vcpu * vcpu,u32 error_code)2400 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
2401 {
2402 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2403 }
2404
2405 #define TSS_IOPB_BASE_OFFSET 0x66
2406 #define TSS_BASE_SIZE 0x68
2407 #define TSS_IOPB_SIZE (65536 / 8)
2408 #define TSS_REDIRECTION_SIZE (256 / 8)
2409 #define RMODE_TSS_SIZE \
2410 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
2411
2412 enum {
2413 TASK_SWITCH_CALL = 0,
2414 TASK_SWITCH_IRET = 1,
2415 TASK_SWITCH_JMP = 2,
2416 TASK_SWITCH_GATE = 3,
2417 };
2418
2419 #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2420
2421 #ifdef CONFIG_KVM_SMM
2422 #define HF_SMM_MASK (1 << 1)
2423 #define HF_SMM_INSIDE_NMI_MASK (1 << 2)
2424
2425 # define KVM_MAX_NR_ADDRESS_SPACES 2
2426 /* SMM is currently unsupported for guests with private memory. */
2427 # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
2428 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2429 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2430 #else
2431 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
2432 #endif
2433
2434 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
2435 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
2436 int kvm_cpu_has_extint(struct kvm_vcpu *v);
2437 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2438 int kvm_cpu_get_extint(struct kvm_vcpu *v);
2439 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
2440 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2441
2442 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
2443 unsigned long ipi_bitmap_high, u32 min,
2444 unsigned long icr, int op_64_bit);
2445
2446 int kvm_add_user_return_msr(u32 msr);
2447 int kvm_find_user_return_msr(u32 msr);
2448 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2449 u64 kvm_get_user_return_msr(unsigned int slot);
2450
kvm_is_supported_user_return_msr(u32 msr)2451 static inline bool kvm_is_supported_user_return_msr(u32 msr)
2452 {
2453 return kvm_find_user_return_msr(msr) >= 0;
2454 }
2455
2456 u64 kvm_scale_tsc(u64 tsc, u64 ratio);
2457 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
2458 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
2459 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
2460
2461 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
2462 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
2463
2464 void kvm_make_scan_ioapic_request(struct kvm *kvm);
2465 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
2466 unsigned long *vcpu_bitmap);
2467
2468 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2469 struct kvm_async_pf *work);
2470 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2471 struct kvm_async_pf *work);
2472 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2473 struct kvm_async_pf *work);
2474 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
2475 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
2476 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
2477
2478 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
2479 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2480
2481 void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
2482 u32 size);
2483 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
2484 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
2485
kvm_irq_is_postable(struct kvm_lapic_irq * irq)2486 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
2487 {
2488 /* We can only post Fixed and LowPrio IRQs */
2489 return (irq->delivery_mode == APIC_DM_FIXED ||
2490 irq->delivery_mode == APIC_DM_LOWEST);
2491 }
2492
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)2493 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2494 {
2495 kvm_x86_call(vcpu_blocking)(vcpu);
2496 }
2497
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)2498 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2499 {
2500 kvm_x86_call(vcpu_unblocking)(vcpu);
2501 }
2502
kvm_cpu_get_apicid(int mps_cpu)2503 static inline int kvm_cpu_get_apicid(int mps_cpu)
2504 {
2505 #ifdef CONFIG_X86_LOCAL_APIC
2506 return default_cpu_present_to_apicid(mps_cpu);
2507 #else
2508 WARN_ON_ONCE(1);
2509 return BAD_APICID;
2510 #endif
2511 }
2512
2513 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
2514
2515 #define KVM_CLOCK_VALID_FLAGS \
2516 (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2517
2518 #define KVM_X86_VALID_QUIRKS \
2519 (KVM_X86_QUIRK_LINT0_REENABLED | \
2520 KVM_X86_QUIRK_CD_NW_CLEARED | \
2521 KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
2522 KVM_X86_QUIRK_OUT_7E_INC_RIP | \
2523 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
2524 KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
2525 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
2526 KVM_X86_QUIRK_SLOT_ZAP_ALL | \
2527 KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \
2528 KVM_X86_QUIRK_IGNORE_GUEST_PAT | \
2529 KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)
2530
2531 #define KVM_X86_CONDITIONAL_QUIRKS \
2532 (KVM_X86_QUIRK_CD_NW_CLEARED | \
2533 KVM_X86_QUIRK_IGNORE_GUEST_PAT)
2534
2535 /*
2536 * KVM previously used a u32 field in kvm_run to indicate the hypercall was
2537 * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
2538 * remaining 31 lower bits must be 0 to preserve ABI.
2539 */
2540 #define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
2541
kvm_arch_has_irq_bypass(void)2542 static inline bool kvm_arch_has_irq_bypass(void)
2543 {
2544 return enable_device_posted_irqs;
2545 }
2546
2547 #endif /* _ASM_X86_KVM_HOST_H */
2548