1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This header defines architecture specific interfaces, x86 version
6 */
7
8 #ifndef _ASM_X86_KVM_HOST_H
9 #define _ASM_X86_KVM_HOST_H
10
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/tracepoint.h>
15 #include <linux/cpumask.h>
16 #include <linux/irq_work.h>
17 #include <linux/irq.h>
18 #include <linux/workqueue.h>
19
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22 #include <linux/kvm_types.h>
23 #include <linux/perf_event.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/clocksource.h>
26 #include <linux/irqbypass.h>
27 #include <linux/kfifo.h>
28 #include <linux/sched/vhost_task.h>
29 #include <linux/call_once.h>
30 #include <linux/atomic.h>
31
32 #include <asm/apic.h>
33 #include <asm/pvclock-abi.h>
34 #include <asm/debugreg.h>
35 #include <asm/desc.h>
36 #include <asm/mtrr.h>
37 #include <asm/msr-index.h>
38 #include <asm/msr.h>
39 #include <asm/asm.h>
40 #include <asm/irq_remapping.h>
41 #include <asm/kvm_page_track.h>
42 #include <asm/kvm_vcpu_regs.h>
43 #include <asm/reboot.h>
44 #include <hyperv/hvhdk.h>
45
46 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
47
48 /*
49 * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
50 * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
51 */
52 #ifdef CONFIG_KVM_MAX_NR_VCPUS
53 #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS
54 #else
55 #define KVM_MAX_VCPUS 1024
56 #endif
57
58 /*
59 * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
60 * might be larger than the actual number of VCPUs because the
61 * APIC ID encodes CPU topology information.
62 *
63 * In the worst case, we'll need less than one extra bit for the
64 * Core ID, and less than one extra bit for the Package (Die) ID,
65 * so ratio of 4 should be enough.
66 */
67 #define KVM_VCPU_ID_RATIO 4
68 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
69
70 /* memory slots that are not exposed to userspace */
71 #define KVM_INTERNAL_MEM_SLOTS 3
72
73 #define KVM_HALT_POLL_NS_DEFAULT 200000
74
75 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
76
77 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
78 KVM_DIRTY_LOG_INITIALLY_SET)
79
80 #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
81 KVM_BUS_LOCK_DETECTION_EXIT)
82
83 #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
84 KVM_X86_NOTIFY_VMEXIT_USER)
85
86 /* x86-specific vcpu->requests bit members */
87 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
88 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
89 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
90 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
91 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
92 #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
93 #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
94 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
95 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
96 #define KVM_REQ_NMI KVM_ARCH_REQ(9)
97 #define KVM_REQ_PMU KVM_ARCH_REQ(10)
98 #define KVM_REQ_PMI KVM_ARCH_REQ(11)
99 #ifdef CONFIG_KVM_SMM
100 #define KVM_REQ_SMI KVM_ARCH_REQ(12)
101 #endif
102 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
103 #define KVM_REQ_MCLOCK_INPROGRESS \
104 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
105 #define KVM_REQ_SCAN_IOAPIC \
106 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
107 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
108 #define KVM_REQ_APIC_PAGE_RELOAD \
109 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
110 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
111 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
112 #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
113 #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
114 #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
115 #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
116 #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
117 #define KVM_REQ_APICV_UPDATE \
118 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
119 #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
120 #define KVM_REQ_TLB_FLUSH_GUEST \
121 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
122 #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
123 #define KVM_REQ_RECALC_INTERCEPTS KVM_ARCH_REQ(29)
124 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
125 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
126 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
127 KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
128 #define KVM_REQ_HV_TLB_FLUSH \
129 KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
130 #define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE \
131 KVM_ARCH_REQ_FLAGS(34, KVM_REQUEST_WAIT)
132
133 #define CR0_RESERVED_BITS \
134 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
135 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
136 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
137
138 #define CR4_RESERVED_BITS \
139 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
140 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
141 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
142 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
143 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
144 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
145 | X86_CR4_LAM_SUP | X86_CR4_CET))
146
147 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
148
149
150
151 #define INVALID_PAGE (~(hpa_t)0)
152 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
153
154 /* KVM Hugepage definitions for x86 */
155 #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
156 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
157 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
158 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
159 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
160 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
161 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
162
163 #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
164 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
165 #define KVM_MMU_HASH_SHIFT 12
166 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
167 #define KVM_MIN_FREE_MMU_PAGES 5
168 #define KVM_REFILL_PAGES 25
169 #define KVM_MAX_CPUID_ENTRIES 256
170 #define KVM_NR_VAR_MTRR 8
171
172 #define ASYNC_PF_PER_VCPU 64
173
174 enum kvm_reg {
175 VCPU_REGS_RAX = __VCPU_REGS_RAX,
176 VCPU_REGS_RCX = __VCPU_REGS_RCX,
177 VCPU_REGS_RDX = __VCPU_REGS_RDX,
178 VCPU_REGS_RBX = __VCPU_REGS_RBX,
179 VCPU_REGS_RSP = __VCPU_REGS_RSP,
180 VCPU_REGS_RBP = __VCPU_REGS_RBP,
181 VCPU_REGS_RSI = __VCPU_REGS_RSI,
182 VCPU_REGS_RDI = __VCPU_REGS_RDI,
183 #ifdef CONFIG_X86_64
184 VCPU_REGS_R8 = __VCPU_REGS_R8,
185 VCPU_REGS_R9 = __VCPU_REGS_R9,
186 VCPU_REGS_R10 = __VCPU_REGS_R10,
187 VCPU_REGS_R11 = __VCPU_REGS_R11,
188 VCPU_REGS_R12 = __VCPU_REGS_R12,
189 VCPU_REGS_R13 = __VCPU_REGS_R13,
190 VCPU_REGS_R14 = __VCPU_REGS_R14,
191 VCPU_REGS_R15 = __VCPU_REGS_R15,
192 #endif
193 VCPU_REGS_RIP,
194 NR_VCPU_REGS,
195
196 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
197 VCPU_EXREG_CR0,
198 /*
199 * Alias AMD's ERAPS (not a real register) to CR3 so that common code
200 * can trigger emulation of the RAP (Return Address Predictor) with
201 * minimal support required in common code. Piggyback CR3 as the RAP
202 * is cleared on writes to CR3, i.e. marking CR3 dirty will naturally
203 * mark ERAPS dirty as well.
204 */
205 VCPU_EXREG_CR3,
206 VCPU_EXREG_ERAPS = VCPU_EXREG_CR3,
207 VCPU_EXREG_CR4,
208 VCPU_EXREG_RFLAGS,
209 VCPU_EXREG_SEGMENTS,
210 VCPU_EXREG_EXIT_INFO_1,
211 VCPU_EXREG_EXIT_INFO_2,
212 };
213
214 enum {
215 VCPU_SREG_ES,
216 VCPU_SREG_CS,
217 VCPU_SREG_SS,
218 VCPU_SREG_DS,
219 VCPU_SREG_FS,
220 VCPU_SREG_GS,
221 VCPU_SREG_TR,
222 VCPU_SREG_LDTR,
223 };
224
225 enum exit_fastpath_completion {
226 EXIT_FASTPATH_NONE,
227 EXIT_FASTPATH_REENTER_GUEST,
228 EXIT_FASTPATH_EXIT_HANDLED,
229 EXIT_FASTPATH_EXIT_USERSPACE,
230 };
231 typedef enum exit_fastpath_completion fastpath_t;
232
233 struct x86_emulate_ctxt;
234 struct x86_exception;
235 union kvm_smram;
236 enum x86_intercept;
237 enum x86_intercept_stage;
238
239 #define KVM_NR_DB_REGS 4
240
241 #define DR6_BUS_LOCK (1 << 11)
242 #define DR6_BD (1 << 13)
243 #define DR6_BS (1 << 14)
244 #define DR6_BT (1 << 15)
245 #define DR6_RTM (1 << 16)
246 /*
247 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
248 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
249 * they will never be 0 for now, but when they are defined
250 * in the future it will require no code change.
251 *
252 * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
253 */
254 #define DR6_ACTIVE_LOW 0xffff0ff0
255 #define DR6_VOLATILE 0x0001e80f
256 #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
257
258 #define DR7_BP_EN_MASK 0x000000ff
259 #define DR7_GE (1 << 9)
260 #define DR7_GD (1 << 13)
261 #define DR7_VOLATILE 0xffff2bff
262
263 #define KVM_GUESTDBG_VALID_MASK \
264 (KVM_GUESTDBG_ENABLE | \
265 KVM_GUESTDBG_SINGLESTEP | \
266 KVM_GUESTDBG_USE_HW_BP | \
267 KVM_GUESTDBG_USE_SW_BP | \
268 KVM_GUESTDBG_INJECT_BP | \
269 KVM_GUESTDBG_INJECT_DB | \
270 KVM_GUESTDBG_BLOCKIRQ)
271
272 #define PFERR_PRESENT_MASK BIT(0)
273 #define PFERR_WRITE_MASK BIT(1)
274 #define PFERR_USER_MASK BIT(2)
275 #define PFERR_RSVD_MASK BIT(3)
276 #define PFERR_FETCH_MASK BIT(4)
277 #define PFERR_PK_MASK BIT(5)
278 #define PFERR_SS_MASK BIT(6)
279 #define PFERR_SGX_MASK BIT(15)
280 #define PFERR_GUEST_RMP_MASK BIT_ULL(31)
281 #define PFERR_GUEST_FINAL_MASK BIT_ULL(32)
282 #define PFERR_GUEST_PAGE_MASK BIT_ULL(33)
283 #define PFERR_GUEST_ENC_MASK BIT_ULL(34)
284 #define PFERR_GUEST_SIZEM_MASK BIT_ULL(35)
285 #define PFERR_GUEST_VMPL_MASK BIT_ULL(36)
286
287 /*
288 * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
289 * when emulating instructions that triggers implicit access.
290 */
291 #define PFERR_IMPLICIT_ACCESS BIT_ULL(48)
292 /*
293 * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
294 * when the guest was accessing private memory.
295 */
296 #define PFERR_PRIVATE_ACCESS BIT_ULL(49)
297 #define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
298
299 /* apic attention bits */
300 #define KVM_APIC_CHECK_VAPIC 0
301 /*
302 * The following bit is set with PV-EOI, unset on EOI.
303 * We detect PV-EOI changes by guest by comparing
304 * this bit with PV-EOI in guest memory.
305 * See the implementation in apic_update_pv_eoi.
306 */
307 #define KVM_APIC_PV_EOI_PENDING 1
308
309 struct kvm_kernel_irqfd;
310 struct kvm_kernel_irq_routing_entry;
311
312 /*
313 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
314 * also includes TDP pages) to determine whether or not a page can be used in
315 * the given MMU context. This is a subset of the overall kvm_cpu_role to
316 * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
317 * allocating 2 bytes per gfn instead of 4 bytes per gfn.
318 *
319 * Upper-level shadow pages having gptes are tracked for write-protection via
320 * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must
321 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
322 * otherwise gfn_write_track will overflow and explosions will ensue.
323 *
324 * A unique shadow page (SP) for a gfn is created if and only if an existing SP
325 * cannot be reused. The ability to reuse a SP is tracked by its role, which
326 * incorporates various mode bits and properties of the SP. Roughly speaking,
327 * the number of unique SPs that can theoretically be created is 2^n, where n
328 * is the number of bits that are used to compute the role.
329 *
330 * But, even though there are 20 bits in the mask below, not all combinations
331 * of modes and flags are possible:
332 *
333 * - invalid shadow pages are not accounted, mirror pages are not shadowed,
334 * so the bits are effectively 18.
335 *
336 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
337 * execonly and ad_disabled are only used for nested EPT which has
338 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
339 *
340 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
341 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
342 * paging has exactly one upper level, making level completely redundant
343 * when has_4_byte_gpte=1.
344 *
345 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
346 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
347 *
348 * Therefore, the maximum number of possible upper-level shadow pages for a
349 * single gfn is a bit less than 2^13.
350 */
351 union kvm_mmu_page_role {
352 u32 word;
353 struct {
354 unsigned level:4;
355 unsigned has_4_byte_gpte:1;
356 unsigned quadrant:2;
357 unsigned direct:1;
358 unsigned access:3;
359 unsigned invalid:1;
360 unsigned efer_nx:1;
361 unsigned cr0_wp:1;
362 unsigned smep_andnot_wp:1;
363 unsigned smap_andnot_wp:1;
364 unsigned ad_disabled:1;
365 unsigned guest_mode:1;
366 unsigned passthrough:1;
367 unsigned is_mirror:1;
368 unsigned :4;
369
370 /*
371 * This is left at the top of the word so that
372 * kvm_memslots_for_spte_role can extract it with a
373 * simple shift. While there is room, give it a whole
374 * byte so it is also faster to load it from memory.
375 */
376 unsigned smm:8;
377 };
378 };
379
380 /*
381 * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
382 * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
383 * including on nested transitions, if nothing in the full role changes then
384 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
385 * don't treat all-zero structure as valid data.
386 *
387 * The properties that are tracked in the extended role but not the page role
388 * are for things that either (a) do not affect the validity of the shadow page
389 * or (b) are indirectly reflected in the shadow page's role. For example,
390 * CR4.PKE only affects permission checks for software walks of the guest page
391 * tables (because KVM doesn't support Protection Keys with shadow paging), and
392 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
393 *
394 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
395 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
396 * SMAP, but the MMU's permission checks for software walks need to be SMEP and
397 * SMAP aware regardless of CR0.WP.
398 */
399 union kvm_mmu_extended_role {
400 u32 word;
401 struct {
402 unsigned int valid:1;
403 unsigned int execonly:1;
404 unsigned int cr4_pse:1;
405 unsigned int cr4_pke:1;
406 unsigned int cr4_smap:1;
407 unsigned int cr4_smep:1;
408 unsigned int cr4_la57:1;
409 unsigned int efer_lma:1;
410 };
411 };
412
413 union kvm_cpu_role {
414 u64 as_u64;
415 struct {
416 union kvm_mmu_page_role base;
417 union kvm_mmu_extended_role ext;
418 };
419 };
420
421 struct kvm_rmap_head {
422 atomic_long_t val;
423 };
424
425 struct kvm_pio_request {
426 unsigned long count;
427 int in;
428 int port;
429 int size;
430 };
431
432 #define PT64_ROOT_MAX_LEVEL 5
433
434 struct rsvd_bits_validate {
435 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
436 u64 bad_mt_xwr;
437 };
438
439 struct kvm_mmu_root_info {
440 gpa_t pgd;
441 hpa_t hpa;
442 };
443
444 #define KVM_MMU_ROOT_INFO_INVALID \
445 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
446
447 #define KVM_MMU_NUM_PREV_ROOTS 3
448
449 #define KVM_MMU_ROOT_CURRENT BIT(0)
450 #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
451 #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
452
453 #define KVM_HAVE_MMU_RWLOCK
454
455 struct kvm_mmu_page;
456 struct kvm_page_fault;
457
458 /*
459 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
460 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
461 * current mmu mode.
462 */
463 struct kvm_mmu {
464 unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
465 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
466 int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
467 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
468 struct x86_exception *fault);
469 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
470 gpa_t gva_or_gpa, u64 access,
471 struct x86_exception *exception);
472 int (*sync_spte)(struct kvm_vcpu *vcpu,
473 struct kvm_mmu_page *sp, int i);
474 struct kvm_mmu_root_info root;
475 hpa_t mirror_root_hpa;
476 union kvm_cpu_role cpu_role;
477 union kvm_mmu_page_role root_role;
478
479 /*
480 * The pkru_mask indicates if protection key checks are needed. It
481 * consists of 16 domains indexed by page fault error code bits [4:1],
482 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
483 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
484 */
485 u32 pkru_mask;
486
487 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
488
489 /*
490 * Bitmap; bit set = permission fault
491 * Byte index: page fault error code [4:1]
492 * Bit index: pte permissions in ACC_* format
493 */
494 u8 permissions[16];
495
496 u64 *pae_root;
497 u64 *pml4_root;
498 u64 *pml5_root;
499
500 /*
501 * check zero bits on shadow page table entries, these
502 * bits include not only hardware reserved bits but also
503 * the bits spte never used.
504 */
505 struct rsvd_bits_validate shadow_zero_check;
506
507 struct rsvd_bits_validate guest_rsvd_check;
508
509 u64 pdptrs[4]; /* pae */
510 };
511
512 enum pmc_type {
513 KVM_PMC_GP = 0,
514 KVM_PMC_FIXED,
515 };
516
517 struct kvm_pmc {
518 enum pmc_type type;
519 u8 idx;
520 bool is_paused;
521 bool intr;
522 /*
523 * Base value of the PMC counter, relative to the *consumed* count in
524 * the associated perf_event. This value includes counter updates from
525 * the perf_event and emulated_count since the last time the counter
526 * was reprogrammed, but it is *not* the current value as seen by the
527 * guest or userspace.
528 *
529 * The count is relative to the associated perf_event so that KVM
530 * doesn't need to reprogram the perf_event every time the guest writes
531 * to the counter.
532 */
533 u64 counter;
534 /*
535 * PMC events triggered by KVM emulation that haven't been fully
536 * processed, i.e. haven't undergone overflow detection.
537 */
538 u64 emulated_counter;
539 u64 eventsel;
540 u64 eventsel_hw;
541 struct perf_event *perf_event;
542 struct kvm_vcpu *vcpu;
543 /*
544 * only for creating or reusing perf_event,
545 * eventsel value for general purpose counters,
546 * ctrl value for fixed counters.
547 */
548 u64 current_config;
549 };
550
551 /* More counters may conflict with other existing Architectural MSRs */
552 #define KVM_MAX(a, b) ((a) >= (b) ? (a) : (b))
553 #define KVM_MAX_NR_INTEL_GP_COUNTERS 8
554 #define KVM_MAX_NR_AMD_GP_COUNTERS 6
555 #define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
556 KVM_MAX_NR_AMD_GP_COUNTERS)
557
558 #define KVM_MAX_NR_INTEL_FIXED_COUNTERS 3
559 #define KVM_MAX_NR_AMD_FIXED_COUNTERS 0
560 #define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUNTERS, \
561 KVM_MAX_NR_AMD_FIXED_COUNTERS)
562
563 struct kvm_pmu {
564 u8 version;
565 unsigned nr_arch_gp_counters;
566 unsigned nr_arch_fixed_counters;
567 unsigned available_event_types;
568 u64 fixed_ctr_ctrl;
569 u64 fixed_ctr_ctrl_hw;
570 u64 fixed_ctr_ctrl_rsvd;
571 u64 global_ctrl;
572 u64 global_status;
573 u64 counter_bitmask[2];
574 u64 global_ctrl_rsvd;
575 u64 global_status_rsvd;
576 u64 reserved_bits;
577 u64 raw_event_mask;
578 struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS];
579 struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS];
580
581 /*
582 * Overlay the bitmap with a 64-bit atomic so that all bits can be
583 * set in a single access, e.g. to reprogram all counters when the PMU
584 * filter changes.
585 */
586 union {
587 DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
588 atomic64_t __reprogram_pmi;
589 };
590 DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
591 DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
592
593 DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
594 DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
595
596 u64 ds_area;
597 u64 pebs_enable;
598 u64 pebs_enable_rsvd;
599 u64 pebs_data_cfg;
600 u64 pebs_data_cfg_rsvd;
601
602 /*
603 * If a guest counter is cross-mapped to host counter with different
604 * index, its PEBS capability will be temporarily disabled.
605 *
606 * The user should make sure that this mask is updated
607 * after disabling interrupts and before perf_guest_get_msrs();
608 */
609 u64 host_cross_mapped_mask;
610
611 /*
612 * The gate to release perf_events not marked in
613 * pmc_in_use only once in a vcpu time slice.
614 */
615 bool need_cleanup;
616
617 /*
618 * The total number of programmed perf_events and it helps to avoid
619 * redundant check before cleanup if guest don't use vPMU at all.
620 */
621 u8 event_count;
622 };
623
624 struct kvm_pmu_ops;
625
626 enum {
627 KVM_DEBUGREG_BP_ENABLED = BIT(0),
628 KVM_DEBUGREG_WONT_EXIT = BIT(1),
629 /*
630 * Guest debug registers (DR0-3, DR6 and DR7) are saved/restored by
631 * hardware on exit from or enter to guest. KVM needn't switch them.
632 * DR0-3, DR6 and DR7 are set to their architectural INIT value on VM
633 * exit, host values need to be restored.
634 */
635 KVM_DEBUGREG_AUTO_SWITCH = BIT(2),
636 };
637
638 struct kvm_mtrr {
639 u64 var[KVM_NR_VAR_MTRR * 2];
640 u64 fixed_64k;
641 u64 fixed_16k[2];
642 u64 fixed_4k[8];
643 u64 deftype;
644 };
645
646 /* Hyper-V SynIC timer */
647 struct kvm_vcpu_hv_stimer {
648 struct hrtimer timer;
649 int index;
650 union hv_stimer_config config;
651 u64 count;
652 u64 exp_time;
653 struct hv_message msg;
654 bool msg_pending;
655 };
656
657 /* Hyper-V synthetic interrupt controller (SynIC)*/
658 struct kvm_vcpu_hv_synic {
659 u64 version;
660 u64 control;
661 u64 msg_page;
662 u64 evt_page;
663 atomic64_t sint[HV_SYNIC_SINT_COUNT];
664 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
665 DECLARE_BITMAP(auto_eoi_bitmap, 256);
666 DECLARE_BITMAP(vec_bitmap, 256);
667 bool active;
668 bool dont_zero_synic_pages;
669 };
670
671 /* The maximum number of entries on the TLB flush fifo. */
672 #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
673 /*
674 * Note: the following 'magic' entry is made up by KVM to avoid putting
675 * anything besides GVA on the TLB flush fifo. It is theoretically possible
676 * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
677 * which will look identical. KVM's action to 'flush everything' instead of
678 * flushing these particular addresses is, however, fully legitimate as
679 * flushing more than requested is always OK.
680 */
681 #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
682
683 enum hv_tlb_flush_fifos {
684 HV_L1_TLB_FLUSH_FIFO,
685 HV_L2_TLB_FLUSH_FIFO,
686 HV_NR_TLB_FLUSH_FIFOS,
687 };
688
689 struct kvm_vcpu_hv_tlb_flush_fifo {
690 spinlock_t write_lock;
691 DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
692 };
693
694 /* Hyper-V per vcpu emulation context */
695 struct kvm_vcpu_hv {
696 struct kvm_vcpu *vcpu;
697 u32 vp_index;
698 u64 hv_vapic;
699 s64 runtime_offset;
700 struct kvm_vcpu_hv_synic synic;
701 struct kvm_hyperv_exit exit;
702 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
703 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
704 bool enforce_cpuid;
705 struct {
706 u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
707 u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
708 u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
709 u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
710 u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
711 u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
712 u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
713 u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
714 } cpuid_cache;
715
716 struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
717
718 /*
719 * Preallocated buffers for handling hypercalls that pass sparse vCPU
720 * sets (for high vCPU counts, they're too large to comfortably fit on
721 * the stack).
722 */
723 u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
724 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
725
726 struct hv_vp_assist_page vp_assist_page;
727
728 struct {
729 u64 pa_page_gpa;
730 u64 vm_id;
731 u32 vp_id;
732 } nested;
733 };
734
735 struct kvm_hypervisor_cpuid {
736 u32 base;
737 u32 limit;
738 };
739
740 #ifdef CONFIG_KVM_XEN
741 /* Xen HVM per vcpu emulation context */
742 struct kvm_vcpu_xen {
743 u64 hypercall_rip;
744 u32 current_runstate;
745 u8 upcall_vector;
746 struct gfn_to_pfn_cache vcpu_info_cache;
747 struct gfn_to_pfn_cache vcpu_time_info_cache;
748 struct gfn_to_pfn_cache runstate_cache;
749 struct gfn_to_pfn_cache runstate2_cache;
750 u64 last_steal;
751 u64 runstate_entry_time;
752 u64 runstate_times[4];
753 unsigned long evtchn_pending_sel;
754 u32 vcpu_id; /* The Xen / ACPI vCPU ID */
755 u32 timer_virq;
756 u64 timer_expires; /* In guest epoch */
757 atomic_t timer_pending;
758 struct hrtimer timer;
759 int poll_evtchn;
760 struct timer_list poll_timer;
761 struct kvm_hypervisor_cpuid cpuid;
762 };
763 #endif
764
765 struct kvm_queued_exception {
766 bool pending;
767 bool injected;
768 bool has_error_code;
769 u8 vector;
770 u32 error_code;
771 unsigned long payload;
772 bool has_payload;
773 };
774
775 /*
776 * Hardware-defined CPUID leafs that are either scattered by the kernel or are
777 * unknown to the kernel, but need to be directly used by KVM. Note, these
778 * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
779 */
780 enum kvm_only_cpuid_leafs {
781 CPUID_12_EAX = NCAPINTS,
782 CPUID_7_1_EDX,
783 CPUID_8000_0007_EDX,
784 CPUID_8000_0022_EAX,
785 CPUID_7_2_EDX,
786 CPUID_24_0_EBX,
787 CPUID_8000_0021_ECX,
788 CPUID_7_1_ECX,
789 CPUID_1E_1_EAX,
790 CPUID_24_1_ECX,
791 NR_KVM_CPU_CAPS,
792
793 NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
794 };
795
796 struct kvm_vcpu_arch {
797 /*
798 * rip and regs accesses must go through
799 * kvm_{register,rip}_{read,write} functions.
800 */
801 unsigned long regs[NR_VCPU_REGS];
802 u32 regs_avail;
803 u32 regs_dirty;
804
805 unsigned long cr0;
806 unsigned long cr0_guest_owned_bits;
807 unsigned long cr2;
808 unsigned long cr3;
809 unsigned long cr4;
810 unsigned long cr4_guest_owned_bits;
811 unsigned long cr4_guest_rsvd_bits;
812 unsigned long cr8;
813 u32 host_pkru;
814 u32 pkru;
815 u32 hflags;
816 u64 efer;
817 u64 host_debugctl;
818 u64 apic_base;
819 struct kvm_lapic *apic; /* kernel irqchip context */
820 bool load_eoi_exitmap_pending;
821 DECLARE_BITMAP(ioapic_handled_vectors, 256);
822 unsigned long apic_attention;
823 int32_t apic_arb_prio;
824 int mp_state;
825 u64 ia32_misc_enable_msr;
826 u64 smbase;
827 u64 smi_count;
828 bool at_instruction_boundary;
829 bool tpr_access_reporting;
830 bool xfd_no_write_intercept;
831 u64 microcode_version;
832 u64 arch_capabilities;
833 u64 perf_capabilities;
834
835 /*
836 * Paging state of the vcpu
837 *
838 * If the vcpu runs in guest mode with two level paging this still saves
839 * the paging mode of the l1 guest. This context is always used to
840 * handle faults.
841 */
842 struct kvm_mmu *mmu;
843
844 /* Non-nested MMU for L1 */
845 struct kvm_mmu root_mmu;
846
847 /* L1 MMU when running nested */
848 struct kvm_mmu guest_mmu;
849
850 /*
851 * Paging state of an L2 guest (used for nested npt)
852 *
853 * This context will save all necessary information to walk page tables
854 * of an L2 guest. This context is only initialized for page table
855 * walking and not for faulting since we never handle l2 page faults on
856 * the host.
857 */
858 struct kvm_mmu nested_mmu;
859
860 /*
861 * Pointer to the mmu context currently used for
862 * gva_to_gpa translations.
863 */
864 struct kvm_mmu *walk_mmu;
865
866 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
867 struct kvm_mmu_memory_cache mmu_shadow_page_cache;
868 struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
869 struct kvm_mmu_memory_cache mmu_page_header_cache;
870 /*
871 * This cache is to allocate external page table. E.g. private EPT used
872 * by the TDX module.
873 */
874 struct kvm_mmu_memory_cache mmu_external_spt_cache;
875
876 /*
877 * QEMU userspace and the guest each have their own FPU state.
878 * In vcpu_run, we switch between the user and guest FPU contexts.
879 * While running a VCPU, the VCPU thread will have the guest FPU
880 * context.
881 *
882 * Note that while the PKRU state lives inside the fpu registers,
883 * it is switched out separately at VMENTER and VMEXIT time. The
884 * "guest_fpstate" state here contains the guest FPU context, with the
885 * host PRKU bits.
886 */
887 struct fpu_guest guest_fpu;
888
889 u64 xcr0;
890 u64 guest_supported_xcr0;
891 u64 ia32_xss;
892 u64 guest_supported_xss;
893
894 struct kvm_pio_request pio;
895 void *pio_data;
896 void *sev_pio_data;
897 unsigned sev_pio_count;
898
899 u8 event_exit_inst_len;
900
901 bool exception_from_userspace;
902
903 /* Exceptions to be injected to the guest. */
904 struct kvm_queued_exception exception;
905 /* Exception VM-Exits to be synthesized to L1. */
906 struct kvm_queued_exception exception_vmexit;
907
908 struct kvm_queued_interrupt {
909 bool injected;
910 bool soft;
911 u8 nr;
912 } interrupt;
913
914 int halt_request; /* real mode on Intel only */
915
916 int cpuid_nent;
917 struct kvm_cpuid_entry2 *cpuid_entries;
918 bool cpuid_dynamic_bits_dirty;
919 bool is_amd_compatible;
920
921 /*
922 * cpu_caps holds the effective guest capabilities, i.e. the features
923 * the vCPU is allowed to use. Typically, but not always, features can
924 * be used by the guest if and only if both KVM and userspace want to
925 * expose the feature to the guest.
926 *
927 * A common exception is for virtualization holes, i.e. when KVM can't
928 * prevent the guest from using a feature, in which case the vCPU "has"
929 * the feature regardless of what KVM or userspace desires.
930 *
931 * Note, features that don't require KVM involvement in any way are
932 * NOT enforced/sanitized by KVM, i.e. are taken verbatim from the
933 * guest CPUID provided by userspace.
934 */
935 u32 cpu_caps[NR_KVM_CPU_CAPS];
936
937 u64 reserved_gpa_bits;
938 int maxphyaddr;
939
940 /* emulate context */
941
942 struct x86_emulate_ctxt *emulate_ctxt;
943 bool emulate_regs_need_sync_to_vcpu;
944 bool emulate_regs_need_sync_from_vcpu;
945 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
946 unsigned long cui_linear_rip;
947 int cui_rdmsr_imm_reg;
948
949 gpa_t time;
950 s8 pvclock_tsc_shift;
951 u32 pvclock_tsc_mul;
952 unsigned int hw_tsc_khz;
953 struct gfn_to_pfn_cache pv_time;
954 /* set guest stopped flag in pvclock flags field */
955 bool pvclock_set_guest_stopped_request;
956
957 struct {
958 u8 preempted;
959 u64 msr_val;
960 u64 last_steal;
961 struct gfn_to_hva_cache cache;
962 } st;
963
964 u64 l1_tsc_offset;
965 u64 tsc_offset; /* current tsc offset */
966 u64 last_guest_tsc;
967 u64 last_host_tsc;
968 u64 tsc_offset_adjustment;
969 u64 this_tsc_nsec;
970 u64 this_tsc_write;
971 u64 this_tsc_generation;
972 bool tsc_catchup;
973 bool tsc_always_catchup;
974 s8 virtual_tsc_shift;
975 u32 virtual_tsc_mult;
976 u32 virtual_tsc_khz;
977 s64 ia32_tsc_adjust_msr;
978 u64 msr_ia32_power_ctl;
979 u64 l1_tsc_scaling_ratio;
980 u64 tsc_scaling_ratio; /* current scaling ratio */
981
982 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
983 /* Number of NMIs pending injection, not including hardware vNMIs. */
984 unsigned int nmi_pending;
985 bool nmi_injected; /* Trying to inject an NMI this entry */
986 bool smi_pending; /* SMI queued after currently running handler */
987 u8 handling_intr_from_guest;
988
989 struct kvm_mtrr mtrr_state;
990 u64 pat;
991
992 unsigned switch_db_regs;
993 unsigned long db[KVM_NR_DB_REGS];
994 unsigned long dr6;
995 unsigned long dr7;
996 unsigned long eff_db[KVM_NR_DB_REGS];
997 unsigned long guest_debug_dr7;
998 u64 msr_platform_info;
999 u64 msr_misc_features_enables;
1000
1001 u64 mcg_cap;
1002 u64 mcg_status;
1003 u64 mcg_ctl;
1004 u64 mcg_ext_ctl;
1005 u64 *mce_banks;
1006 u64 *mci_ctl2_banks;
1007
1008 /* Cache MMIO info */
1009 u64 mmio_gva;
1010 unsigned mmio_access;
1011 gfn_t mmio_gfn;
1012 u64 mmio_gen;
1013
1014 struct kvm_pmu pmu;
1015
1016 /* used for guest single stepping over the given code position */
1017 unsigned long singlestep_rip;
1018
1019 #ifdef CONFIG_KVM_HYPERV
1020 bool hyperv_enabled;
1021 struct kvm_vcpu_hv *hyperv;
1022 #endif
1023 #ifdef CONFIG_KVM_XEN
1024 struct kvm_vcpu_xen xen;
1025 #endif
1026 cpumask_var_t wbinvd_dirty_mask;
1027
1028 unsigned long last_retry_eip;
1029 unsigned long last_retry_addr;
1030
1031 struct {
1032 bool halted;
1033 gfn_t gfns[ASYNC_PF_PER_VCPU];
1034 struct gfn_to_hva_cache data;
1035 u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
1036 u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
1037 u16 vec;
1038 u32 id;
1039 u32 host_apf_flags;
1040 bool send_always;
1041 bool delivery_as_pf_vmexit;
1042 bool pageready_pending;
1043 } apf;
1044
1045 /* OSVW MSRs (AMD only) */
1046 struct {
1047 u64 length;
1048 u64 status;
1049 } osvw;
1050
1051 struct {
1052 u64 msr_val;
1053 struct gfn_to_hva_cache data;
1054 } pv_eoi;
1055
1056 u64 msr_kvm_poll_control;
1057
1058 /* pv related host specific info */
1059 struct {
1060 bool pv_unhalted;
1061 } pv;
1062
1063 int pending_ioapic_eoi;
1064 int pending_external_vector;
1065 int highest_stale_pending_ioapic_eoi;
1066
1067 /* be preempted when it's in kernel-mode(cpl=0) */
1068 bool preempted_in_kernel;
1069
1070 /* Host CPU on which VM-entry was most recently attempted */
1071 int last_vmentry_cpu;
1072
1073 /* AMD MSRC001_0015 Hardware Configuration */
1074 u64 msr_hwcr;
1075
1076 /* pv related cpuid info */
1077 struct {
1078 /*
1079 * value of the eax register in the KVM_CPUID_FEATURES CPUID
1080 * leaf.
1081 */
1082 u32 features;
1083
1084 /*
1085 * indicates whether pv emulation should be disabled if features
1086 * are not present in the guest's cpuid
1087 */
1088 bool enforce;
1089 } pv_cpuid;
1090
1091 /* Protected Guests */
1092 bool guest_state_protected;
1093 bool guest_tsc_protected;
1094
1095 /*
1096 * Set when PDPTS were loaded directly by the userspace without
1097 * reading the guest memory
1098 */
1099 bool pdptrs_from_userspace;
1100
1101 #if IS_ENABLED(CONFIG_HYPERV)
1102 hpa_t hv_root_tdp;
1103 #endif
1104 };
1105
1106 struct kvm_lpage_info {
1107 int disallow_lpage;
1108 };
1109
1110 struct kvm_arch_memory_slot {
1111 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
1112 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1113 unsigned short *gfn_write_track;
1114 };
1115
1116 /*
1117 * Track the mode of the optimized logical map, as the rules for decoding the
1118 * destination vary per mode. Enabling the optimized logical map requires all
1119 * software-enabled local APIs to be in the same mode, each addressable APIC to
1120 * be mapped to only one MDA, and each MDA to map to at most one APIC.
1121 */
1122 enum kvm_apic_logical_mode {
1123 /* All local APICs are software disabled. */
1124 KVM_APIC_MODE_SW_DISABLED,
1125 /* All software enabled local APICs in xAPIC cluster addressing mode. */
1126 KVM_APIC_MODE_XAPIC_CLUSTER,
1127 /* All software enabled local APICs in xAPIC flat addressing mode. */
1128 KVM_APIC_MODE_XAPIC_FLAT,
1129 /* All software enabled local APICs in x2APIC mode. */
1130 KVM_APIC_MODE_X2APIC,
1131 /*
1132 * Optimized map disabled, e.g. not all local APICs in the same logical
1133 * mode, same logical ID assigned to multiple APICs, etc.
1134 */
1135 KVM_APIC_MODE_MAP_DISABLED,
1136 };
1137
1138 struct kvm_apic_map {
1139 struct rcu_head rcu;
1140 enum kvm_apic_logical_mode logical_mode;
1141 u32 max_apic_id;
1142 union {
1143 struct kvm_lapic *xapic_flat_map[8];
1144 struct kvm_lapic *xapic_cluster_map[16][4];
1145 };
1146 struct kvm_lapic *phys_map[];
1147 };
1148
1149 /* Hyper-V synthetic debugger (SynDbg)*/
1150 struct kvm_hv_syndbg {
1151 struct {
1152 u64 control;
1153 u64 status;
1154 u64 send_page;
1155 u64 recv_page;
1156 u64 pending_page;
1157 } control;
1158 u64 options;
1159 };
1160
1161 /* Current state of Hyper-V TSC page clocksource */
1162 enum hv_tsc_page_status {
1163 /* TSC page was not set up or disabled */
1164 HV_TSC_PAGE_UNSET = 0,
1165 /* TSC page MSR was written by the guest, update pending */
1166 HV_TSC_PAGE_GUEST_CHANGED,
1167 /* TSC page update was triggered from the host side */
1168 HV_TSC_PAGE_HOST_CHANGED,
1169 /* TSC page was properly set up and is currently active */
1170 HV_TSC_PAGE_SET,
1171 /* TSC page was set up with an inaccessible GPA */
1172 HV_TSC_PAGE_BROKEN,
1173 };
1174
1175 #ifdef CONFIG_KVM_HYPERV
1176 /* Hyper-V emulation context */
1177 struct kvm_hv {
1178 struct mutex hv_lock;
1179 u64 hv_guest_os_id;
1180 u64 hv_hypercall;
1181 u64 hv_tsc_page;
1182 enum hv_tsc_page_status hv_tsc_page_status;
1183
1184 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1185 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
1186 u64 hv_crash_ctl;
1187
1188 struct ms_hyperv_tsc_page tsc_ref;
1189
1190 struct idr conn_to_evt;
1191
1192 u64 hv_reenlightenment_control;
1193 u64 hv_tsc_emulation_control;
1194 u64 hv_tsc_emulation_status;
1195 u64 hv_invtsc_control;
1196
1197 /* How many vCPUs have VP index != vCPU index */
1198 atomic_t num_mismatched_vp_indexes;
1199
1200 /*
1201 * How many SynICs use 'AutoEOI' feature
1202 * (protected by arch.apicv_update_lock)
1203 */
1204 unsigned int synic_auto_eoi_used;
1205
1206 struct kvm_hv_syndbg hv_syndbg;
1207
1208 bool xsaves_xsavec_checked;
1209 };
1210 #endif
1211
1212 struct msr_bitmap_range {
1213 u32 flags;
1214 u32 nmsrs;
1215 u32 base;
1216 unsigned long *bitmap;
1217 };
1218
1219 #ifdef CONFIG_KVM_XEN
1220 /* Xen emulation context */
1221 struct kvm_xen {
1222 struct mutex xen_lock;
1223 u32 xen_version;
1224 bool long_mode;
1225 bool runstate_update_flag;
1226 u8 upcall_vector;
1227 struct gfn_to_pfn_cache shinfo_cache;
1228 struct idr evtchn_ports;
1229 unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
1230
1231 struct kvm_xen_hvm_config hvm_config;
1232 };
1233 #endif
1234
1235 enum kvm_irqchip_mode {
1236 KVM_IRQCHIP_NONE,
1237 #ifdef CONFIG_KVM_IOAPIC
1238 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
1239 #endif
1240 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
1241 };
1242
1243 enum kvm_suppress_eoi_broadcast_mode {
1244 KVM_SUPPRESS_EOI_BROADCAST_QUIRKED, /* Legacy behavior */
1245 KVM_SUPPRESS_EOI_BROADCAST_ENABLED, /* Enable Suppress EOI broadcast */
1246 KVM_SUPPRESS_EOI_BROADCAST_DISABLED /* Disable Suppress EOI broadcast */
1247 };
1248
1249 struct kvm_x86_msr_filter {
1250 u8 count;
1251 bool default_allow:1;
1252 struct msr_bitmap_range ranges[16];
1253 };
1254
1255 struct kvm_x86_pmu_event_filter {
1256 __u32 action;
1257 __u32 nevents;
1258 __u32 fixed_counter_bitmap;
1259 __u32 flags;
1260 __u32 nr_includes;
1261 __u32 nr_excludes;
1262 __u64 *includes;
1263 __u64 *excludes;
1264 __u64 events[];
1265 };
1266
1267 enum kvm_apicv_inhibit {
1268
1269 /********************************************************************/
1270 /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
1271 /********************************************************************/
1272
1273 /*
1274 * APIC acceleration is disabled by a module parameter
1275 * and/or not supported in hardware.
1276 */
1277 APICV_INHIBIT_REASON_DISABLED,
1278
1279 /*
1280 * APIC acceleration is inhibited because AutoEOI feature is
1281 * being used by a HyperV guest.
1282 */
1283 APICV_INHIBIT_REASON_HYPERV,
1284
1285 /*
1286 * APIC acceleration is inhibited because the userspace didn't yet
1287 * enable the kernel/split irqchip.
1288 */
1289 APICV_INHIBIT_REASON_ABSENT,
1290
1291 /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
1292 * (out of band, debug measure of blocking all interrupts on this vCPU)
1293 * was enabled, to avoid AVIC/APICv bypassing it.
1294 */
1295 APICV_INHIBIT_REASON_BLOCKIRQ,
1296
1297 /*
1298 * APICv is disabled because not all vCPUs have a 1:1 mapping between
1299 * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack.
1300 */
1301 APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED,
1302
1303 /*
1304 * For simplicity, the APIC acceleration is inhibited
1305 * first time either APIC ID or APIC base are changed by the guest
1306 * from their reset values.
1307 */
1308 APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
1309 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
1310
1311 /******************************************************/
1312 /* INHIBITs that are relevant only to the AMD's AVIC. */
1313 /******************************************************/
1314
1315 /*
1316 * AVIC is inhibited on a vCPU because it runs a nested guest.
1317 *
1318 * This is needed because unlike APICv, the peers of this vCPU
1319 * cannot use the doorbell mechanism to signal interrupts via AVIC when
1320 * a vCPU runs nested.
1321 */
1322 APICV_INHIBIT_REASON_NESTED,
1323
1324 /*
1325 * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
1326 * which cannot be injected when the AVIC is enabled, thus AVIC
1327 * is inhibited while KVM waits for IRQ window.
1328 */
1329 APICV_INHIBIT_REASON_IRQWIN,
1330
1331 /*
1332 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1333 * which AVIC doesn't support for edge triggered interrupts.
1334 */
1335 APICV_INHIBIT_REASON_PIT_REINJ,
1336
1337 /*
1338 * AVIC is disabled because SEV doesn't support it.
1339 */
1340 APICV_INHIBIT_REASON_SEV,
1341
1342 /*
1343 * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1
1344 * mapping between logical ID and vCPU.
1345 */
1346 APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED,
1347
1348 /*
1349 * AVIC is disabled because the vCPU's APIC ID is beyond the max
1350 * supported by AVIC/x2AVIC, i.e. the vCPU is unaddressable.
1351 */
1352 APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG,
1353
1354 NR_APICV_INHIBIT_REASONS,
1355 };
1356
1357 #define __APICV_INHIBIT_REASON(reason) \
1358 { BIT(APICV_INHIBIT_REASON_##reason), #reason }
1359
1360 #define APICV_INHIBIT_REASONS \
1361 __APICV_INHIBIT_REASON(DISABLED), \
1362 __APICV_INHIBIT_REASON(HYPERV), \
1363 __APICV_INHIBIT_REASON(ABSENT), \
1364 __APICV_INHIBIT_REASON(BLOCKIRQ), \
1365 __APICV_INHIBIT_REASON(PHYSICAL_ID_ALIASED), \
1366 __APICV_INHIBIT_REASON(APIC_ID_MODIFIED), \
1367 __APICV_INHIBIT_REASON(APIC_BASE_MODIFIED), \
1368 __APICV_INHIBIT_REASON(NESTED), \
1369 __APICV_INHIBIT_REASON(IRQWIN), \
1370 __APICV_INHIBIT_REASON(PIT_REINJ), \
1371 __APICV_INHIBIT_REASON(SEV), \
1372 __APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED), \
1373 __APICV_INHIBIT_REASON(PHYSICAL_ID_TOO_BIG)
1374
1375 struct kvm_possible_nx_huge_pages {
1376 /*
1377 * A list of kvm_mmu_page structs that, if zapped, could possibly be
1378 * replaced by an NX huge page. A shadow page is on this list if its
1379 * existence disallows an NX huge page (nx_huge_page_disallowed is set)
1380 * and there are no other conditions that prevent a huge page, e.g.
1381 * the backing host page is huge, dirtly logging is not enabled for its
1382 * memslot, etc... Note, zapping shadow pages on this list doesn't
1383 * guarantee an NX huge page will be created in its stead, e.g. if the
1384 * guest attempts to execute from the region then KVM obviously can't
1385 * create an NX huge page (without hanging the guest).
1386 */
1387 struct list_head pages;
1388 u64 nr_pages;
1389 };
1390
1391 enum kvm_mmu_type {
1392 KVM_SHADOW_MMU,
1393 #ifdef CONFIG_X86_64
1394 KVM_TDP_MMU,
1395 #endif
1396 KVM_NR_MMU_TYPES,
1397 };
1398
1399 struct kvm_arch {
1400 unsigned long n_used_mmu_pages;
1401 unsigned long n_requested_mmu_pages;
1402 unsigned long n_max_mmu_pages;
1403 unsigned int indirect_shadow_pages;
1404 u8 mmu_valid_gen;
1405 u8 vm_type;
1406 bool has_private_mem;
1407 bool has_protected_state;
1408 bool has_protected_eoi;
1409 bool pre_fault_allowed;
1410 struct hlist_head *mmu_page_hash;
1411 struct list_head active_mmu_pages;
1412 struct kvm_possible_nx_huge_pages possible_nx_huge_pages[KVM_NR_MMU_TYPES];
1413 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1414 struct kvm_page_track_notifier_head track_notifier_head;
1415 #endif
1416 /*
1417 * Protects marking pages unsync during page faults, as TDP MMU page
1418 * faults only take mmu_lock for read. For simplicity, the unsync
1419 * pages lock is always taken when marking pages unsync regardless of
1420 * whether mmu_lock is held for read or write.
1421 */
1422 spinlock_t mmu_unsync_pages_lock;
1423
1424 u64 shadow_mmio_value;
1425
1426 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1427 atomic_t noncoherent_dma_count;
1428 unsigned long nr_possible_bypass_irqs;
1429
1430 #ifdef CONFIG_KVM_IOAPIC
1431 struct kvm_pic *vpic;
1432 struct kvm_ioapic *vioapic;
1433 struct kvm_pit *vpit;
1434 #endif
1435 atomic_t vapics_in_nmi_mode;
1436 struct mutex apic_map_lock;
1437 struct kvm_apic_map __rcu *apic_map;
1438 atomic_t apic_map_dirty;
1439
1440 bool apic_access_memslot_enabled;
1441 bool apic_access_memslot_inhibited;
1442
1443 /* Protects apicv_inhibit_reasons */
1444 struct rw_semaphore apicv_update_lock;
1445 unsigned long apicv_inhibit_reasons;
1446
1447 gpa_t wall_clock;
1448
1449 u64 disabled_exits;
1450
1451 s64 kvmclock_offset;
1452
1453 /*
1454 * This also protects nr_vcpus_matched_tsc which is read from a
1455 * preemption-disabled region, so it must be a raw spinlock.
1456 */
1457 raw_spinlock_t tsc_write_lock;
1458 u64 last_tsc_nsec;
1459 u64 last_tsc_write;
1460 u32 last_tsc_khz;
1461 u64 last_tsc_offset;
1462 u64 cur_tsc_nsec;
1463 u64 cur_tsc_write;
1464 u64 cur_tsc_offset;
1465 u64 cur_tsc_generation;
1466 int nr_vcpus_matched_tsc;
1467
1468 u32 default_tsc_khz;
1469 bool user_set_tsc;
1470 u64 apic_bus_cycle_ns;
1471
1472 seqcount_raw_spinlock_t pvclock_sc;
1473 bool use_master_clock;
1474 u64 master_kernel_ns;
1475 u64 master_cycle_now;
1476
1477 #ifdef CONFIG_KVM_HYPERV
1478 struct kvm_hv hyperv;
1479 #endif
1480
1481 #ifdef CONFIG_KVM_XEN
1482 struct kvm_xen xen;
1483 #endif
1484
1485 bool backwards_tsc_observed;
1486 bool boot_vcpu_runs_old_kvmclock;
1487 u32 bsp_vcpu_id;
1488
1489 u64 disabled_quirks;
1490
1491 enum kvm_irqchip_mode irqchip_mode;
1492 u8 nr_reserved_ioapic_pins;
1493
1494 bool disabled_lapic_found;
1495
1496 bool x2apic_format;
1497 bool x2apic_broadcast_quirk_disabled;
1498 enum kvm_suppress_eoi_broadcast_mode suppress_eoi_broadcast_mode;
1499
1500 bool has_mapped_host_mmio;
1501 bool guest_can_read_msr_platform_info;
1502 bool exception_payload_enabled;
1503
1504 bool triple_fault_event;
1505
1506 bool bus_lock_detection_enabled;
1507 bool enable_pmu;
1508 bool created_mediated_pmu;
1509
1510 u32 notify_window;
1511 u32 notify_vmexit_flags;
1512 /*
1513 * If exit_on_emulation_error is set, and the in-kernel instruction
1514 * emulator fails to emulate an instruction, allow userspace
1515 * the opportunity to look at it.
1516 */
1517 bool exit_on_emulation_error;
1518
1519 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1520 u32 user_space_msr_mask;
1521 struct kvm_x86_msr_filter __rcu *msr_filter;
1522
1523 u32 hypercall_exit_enabled;
1524
1525 /* Guest can access the SGX PROVISIONKEY. */
1526 bool sgx_provisioning_allowed;
1527
1528 struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
1529 struct vhost_task *nx_huge_page_recovery_thread;
1530 u64 nx_huge_page_last;
1531 struct once nx_once;
1532
1533 #ifdef CONFIG_X86_64
1534 #ifdef CONFIG_KVM_PROVE_MMU
1535 /*
1536 * The number of TDP MMU pages across all roots. Used only to sanity
1537 * check that KVM isn't leaking TDP MMU pages.
1538 */
1539 atomic64_t tdp_mmu_pages;
1540 #endif
1541
1542 /*
1543 * List of struct kvm_mmu_pages being used as roots.
1544 * All struct kvm_mmu_pages in the list should have
1545 * tdp_mmu_page set.
1546 *
1547 * For reads, this list is protected by:
1548 * RCU alone or
1549 * the MMU lock in read mode + RCU or
1550 * the MMU lock in write mode
1551 *
1552 * For writes, this list is protected by tdp_mmu_pages_lock; see
1553 * below for the details.
1554 *
1555 * Roots will remain in the list until their tdp_mmu_root_count
1556 * drops to zero, at which point the thread that decremented the
1557 * count to zero should removed the root from the list and clean
1558 * it up, freeing the root after an RCU grace period.
1559 */
1560 struct list_head tdp_mmu_roots;
1561
1562 /*
1563 * Protects accesses to the following fields when the MMU lock
1564 * is held in read mode:
1565 * - tdp_mmu_roots (above)
1566 * - the link field of kvm_mmu_page structs used by the TDP MMU
1567 * - possible_nx_huge_pages[KVM_TDP_MMU];
1568 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1569 * by the TDP MMU
1570 * Because the lock is only taken within the MMU lock, strictly
1571 * speaking it is redundant to acquire this lock when the thread
1572 * holds the MMU lock in write mode. However it often simplifies
1573 * the code to do so.
1574 */
1575 spinlock_t tdp_mmu_pages_lock;
1576 #endif /* CONFIG_X86_64 */
1577
1578 /*
1579 * If set, at least one shadow root has been allocated. This flag
1580 * is used as one input when determining whether certain memslot
1581 * related allocations are necessary.
1582 */
1583 bool shadow_root_allocated;
1584
1585 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1586 /*
1587 * If set, the VM has (or had) an external write tracking user, and
1588 * thus all write tracking metadata has been allocated, even if KVM
1589 * itself isn't using write tracking.
1590 */
1591 bool external_write_tracking_enabled;
1592 #endif
1593
1594 #if IS_ENABLED(CONFIG_HYPERV)
1595 hpa_t hv_root_tdp;
1596 spinlock_t hv_root_tdp_lock;
1597 struct hv_partition_assist_pg *hv_pa_pg;
1598 #endif
1599 /*
1600 * VM-scope maximum vCPU ID. Used to determine the size of structures
1601 * that increase along with the maximum vCPU ID, in which case, using
1602 * the global KVM_MAX_VCPU_IDS may lead to significant memory waste.
1603 */
1604 u32 max_vcpu_ids;
1605
1606 bool disable_nx_huge_pages;
1607
1608 /*
1609 * Memory caches used to allocate shadow pages when performing eager
1610 * page splitting. No need for a shadowed_info_cache since eager page
1611 * splitting only allocates direct shadow pages.
1612 *
1613 * Protected by kvm->slots_lock.
1614 */
1615 struct kvm_mmu_memory_cache split_shadow_page_cache;
1616 struct kvm_mmu_memory_cache split_page_header_cache;
1617
1618 /*
1619 * Memory cache used to allocate pte_list_desc structs while splitting
1620 * huge pages. In the worst case, to split one huge page, 512
1621 * pte_list_desc structs are needed to add each lower level leaf sptep
1622 * to the rmap plus 1 to extend the parent_ptes rmap of the lower level
1623 * page table.
1624 *
1625 * Protected by kvm->slots_lock.
1626 */
1627 #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1628 struct kvm_mmu_memory_cache split_desc_cache;
1629
1630 gfn_t gfn_direct_bits;
1631
1632 /*
1633 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero
1634 * value indicates CPU dirty logging is unsupported or disabled in
1635 * current VM.
1636 */
1637 int cpu_dirty_log_size;
1638 };
1639
1640 struct kvm_vm_stat {
1641 struct kvm_vm_stat_generic generic;
1642 u64 mmu_shadow_zapped;
1643 u64 mmu_pte_write;
1644 u64 mmu_pde_zapped;
1645 u64 mmu_flooded;
1646 u64 mmu_recycled;
1647 u64 mmu_cache_miss;
1648 u64 mmu_unsync;
1649 union {
1650 struct {
1651 atomic64_t pages_4k;
1652 atomic64_t pages_2m;
1653 atomic64_t pages_1g;
1654 };
1655 atomic64_t pages[KVM_NR_PAGE_SIZES];
1656 };
1657 u64 nx_lpage_splits;
1658 u64 max_mmu_page_hash_collisions;
1659 u64 max_mmu_rmap_size;
1660 };
1661
1662 struct kvm_vcpu_stat {
1663 struct kvm_vcpu_stat_generic generic;
1664 u64 pf_taken;
1665 u64 pf_fixed;
1666 u64 pf_emulate;
1667 u64 pf_spurious;
1668 u64 pf_fast;
1669 u64 pf_mmio_spte_created;
1670 u64 pf_guest;
1671 u64 tlb_flush;
1672 u64 invlpg;
1673
1674 u64 exits;
1675 u64 io_exits;
1676 u64 mmio_exits;
1677 u64 signal_exits;
1678 u64 irq_window_exits;
1679 u64 nmi_window_exits;
1680 u64 l1d_flush;
1681 u64 halt_exits;
1682 u64 request_irq_exits;
1683 u64 irq_exits;
1684 u64 host_state_reload;
1685 u64 fpu_reload;
1686 u64 insn_emulation;
1687 u64 insn_emulation_fail;
1688 u64 hypercalls;
1689 u64 irq_injections;
1690 u64 nmi_injections;
1691 u64 req_event;
1692 u64 nested_run;
1693 u64 directed_yield_attempted;
1694 u64 directed_yield_successful;
1695 u64 preemption_reported;
1696 u64 preemption_other;
1697 u64 guest_mode;
1698 u64 notify_window_exits;
1699 };
1700
1701 struct x86_instruction_info;
1702
1703 struct msr_data {
1704 bool host_initiated;
1705 u32 index;
1706 u64 data;
1707 };
1708
1709 struct kvm_lapic_irq {
1710 u32 vector;
1711 u16 delivery_mode;
1712 u16 dest_mode;
1713 bool level;
1714 u16 trig_mode;
1715 u32 shorthand;
1716 u32 dest_id;
1717 bool msi_redir_hint;
1718 };
1719
kvm_lapic_irq_dest_mode(bool dest_mode_logical)1720 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1721 {
1722 return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1723 }
1724
1725 enum kvm_x86_run_flags {
1726 KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0),
1727 KVM_RUN_LOAD_GUEST_DR6 = BIT(1),
1728 KVM_RUN_LOAD_DEBUGCTL = BIT(2),
1729 };
1730
1731 struct kvm_x86_ops {
1732 const char *name;
1733
1734 int (*check_processor_compatibility)(void);
1735
1736 int (*enable_virtualization_cpu)(void);
1737 void (*disable_virtualization_cpu)(void);
1738 cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
1739
1740 void (*hardware_unsetup)(void);
1741 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1742 void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1743
1744 unsigned int vm_size;
1745 int (*vm_init)(struct kvm *kvm);
1746 void (*vm_destroy)(struct kvm *kvm);
1747 void (*vm_pre_destroy)(struct kvm *kvm);
1748
1749 /* Create, but do not attach this VCPU */
1750 int (*vcpu_precreate)(struct kvm *kvm);
1751 int (*vcpu_create)(struct kvm_vcpu *vcpu);
1752 void (*vcpu_free)(struct kvm_vcpu *vcpu);
1753 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1754
1755 void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu);
1756 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1757 void (*vcpu_put)(struct kvm_vcpu *vcpu);
1758
1759 /*
1760 * Mask of DEBUGCTL bits that are owned by the host, i.e. that need to
1761 * match the host's value even while the guest is active.
1762 */
1763 const u64 HOST_OWNED_DEBUGCTL;
1764
1765 void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1766 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1767 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1768 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1769 void (*get_segment)(struct kvm_vcpu *vcpu,
1770 struct kvm_segment *var, int seg);
1771 int (*get_cpl)(struct kvm_vcpu *vcpu);
1772 int (*get_cpl_no_cache)(struct kvm_vcpu *vcpu);
1773 void (*set_segment)(struct kvm_vcpu *vcpu,
1774 struct kvm_segment *var, int seg);
1775 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1776 bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1777 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1778 void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1779 bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1780 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1781 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1782 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1783 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1784 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1785 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1786 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1787 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1788 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1789 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1790 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1791 bool (*get_if_flag)(struct kvm_vcpu *vcpu);
1792
1793 void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
1794 void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1795 #if IS_ENABLED(CONFIG_HYPERV)
1796 int (*flush_remote_tlbs)(struct kvm *kvm);
1797 int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
1798 gfn_t nr_pages);
1799 #endif
1800
1801 /*
1802 * Flush any TLB entries associated with the given GVA.
1803 * Does not need to flush GPA->HPA mappings.
1804 * Can potentially get non-canonical addresses through INVLPGs, which
1805 * the implementation may choose to ignore if appropriate.
1806 */
1807 void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1808
1809 /*
1810 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1811 * does not need to flush GPA->HPA mappings.
1812 */
1813 void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
1814
1815 int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1816 enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
1817 u64 run_flags);
1818 int (*handle_exit)(struct kvm_vcpu *vcpu,
1819 enum exit_fastpath_completion exit_fastpath);
1820 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1821 void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1822 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1823 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1824 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1825 unsigned char *hypercall_addr);
1826 void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
1827 void (*inject_nmi)(struct kvm_vcpu *vcpu);
1828 void (*inject_exception)(struct kvm_vcpu *vcpu);
1829 void (*cancel_injection)(struct kvm_vcpu *vcpu);
1830 int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1831 int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1832 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1833 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1834 /* Whether or not a virtual NMI is pending in hardware. */
1835 bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu);
1836 /*
1837 * Attempt to pend a virtual NMI in hardware. Returns %true on success
1838 * to allow using static_call_ret0 as the fallback.
1839 */
1840 bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu);
1841 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1842 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1843 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1844
1845 const bool x2apic_icr_is_split;
1846 const unsigned long required_apicv_inhibits;
1847 bool allow_apicv_in_x2apic_without_x2apic_virtualization;
1848 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1849 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1850 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1851 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1852 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1853 void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
1854 int trig_mode, int vector);
1855 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1856 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1857 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1858 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1859
1860 void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1861 int root_level);
1862
1863 /* Update external mapping with page table link. */
1864 int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1865 void *external_spt);
1866 /* Update the external page table from spte getting set. */
1867 int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1868 u64 mirror_spte);
1869
1870 /* Update external page tables for page table about to be freed. */
1871 int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1872 void *external_spt);
1873
1874 /* Update external page table from spte getting removed, and flush TLB. */
1875 void (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1876 u64 mirror_spte);
1877
1878 bool (*has_wbinvd_exit)(void);
1879
1880 u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1881 u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1882 void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
1883 void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
1884
1885 /*
1886 * Retrieve somewhat arbitrary exit/entry information. Intended to
1887 * be used only from within tracepoints or error paths.
1888 */
1889 void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
1890 u64 *info1, u64 *info2,
1891 u32 *intr_info, u32 *error_code);
1892
1893 void (*get_entry_info)(struct kvm_vcpu *vcpu,
1894 u32 *intr_info, u32 *error_code);
1895
1896 int (*check_intercept)(struct kvm_vcpu *vcpu,
1897 struct x86_instruction_info *info,
1898 enum x86_intercept_stage stage,
1899 struct x86_exception *exception);
1900 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1901
1902 void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
1903
1904 const struct kvm_x86_nested_ops *nested_ops;
1905
1906 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1907 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1908
1909 int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
1910 unsigned int host_irq, uint32_t guest_irq,
1911 struct kvm_vcpu *vcpu, u32 vector);
1912 void (*pi_start_bypass)(struct kvm *kvm);
1913 void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
1914 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1915 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1916 bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1917
1918 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1919 bool *expired);
1920 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1921
1922 void (*setup_mce)(struct kvm_vcpu *vcpu);
1923
1924 #ifdef CONFIG_KVM_SMM
1925 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1926 int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
1927 int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
1928 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1929 #endif
1930
1931 int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
1932 int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
1933 int (*vcpu_mem_enc_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
1934 int (*vcpu_mem_enc_unlocked_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
1935 int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1936 int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1937 int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1938 int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1939 void (*guest_memory_reclaimed)(struct kvm *kvm);
1940
1941 int (*get_feature_msr)(u32 msr, u64 *data);
1942
1943 int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1944 void *insn, int insn_len);
1945
1946 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1947 int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
1948
1949 void (*migrate_timers)(struct kvm_vcpu *vcpu);
1950 void (*recalc_intercepts)(struct kvm_vcpu *vcpu);
1951 int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1952
1953 void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1954
1955 /*
1956 * Returns vCPU specific APICv inhibit reasons
1957 */
1958 unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
1959
1960 gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
1961 void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
1962 int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
1963 void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
1964 int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
1965 };
1966
1967 struct kvm_x86_nested_ops {
1968 void (*leave_nested)(struct kvm_vcpu *vcpu);
1969 bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
1970 u32 error_code);
1971 int (*check_events)(struct kvm_vcpu *vcpu);
1972 bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
1973 void (*triple_fault)(struct kvm_vcpu *vcpu);
1974 int (*get_state)(struct kvm_vcpu *vcpu,
1975 struct kvm_nested_state __user *user_kvm_nested_state,
1976 unsigned user_data_size);
1977 int (*set_state)(struct kvm_vcpu *vcpu,
1978 struct kvm_nested_state __user *user_kvm_nested_state,
1979 struct kvm_nested_state *kvm_state);
1980 bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
1981 int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1982
1983 int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1984 uint16_t *vmcs_version);
1985 uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
1986 void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu);
1987 };
1988
1989 struct kvm_x86_init_ops {
1990 int (*hardware_setup)(void);
1991 unsigned int (*handle_intel_pt_intr)(void);
1992
1993 struct kvm_x86_ops *runtime_ops;
1994 struct kvm_pmu_ops *pmu_ops;
1995 };
1996
1997 struct kvm_arch_async_pf {
1998 u32 token;
1999 gfn_t gfn;
2000 unsigned long cr3;
2001 bool direct_map;
2002 u64 error_code;
2003 };
2004
2005 extern u32 __read_mostly kvm_nr_uret_msrs;
2006 extern bool __read_mostly allow_smaller_maxphyaddr;
2007 extern bool __read_mostly enable_apicv;
2008 extern bool __read_mostly enable_ipiv;
2009 extern bool __read_mostly enable_device_posted_irqs;
2010 extern struct kvm_x86_ops kvm_x86_ops;
2011
2012 #define kvm_x86_call(func) static_call(kvm_x86_##func)
2013 #define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
2014
2015 #define KVM_X86_OP(func) \
2016 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
2017 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
2018 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
2019 #include <asm/kvm-x86-ops.h>
2020
2021 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
2022 void kvm_x86_vendor_exit(void);
2023
2024 #define __KVM_HAVE_ARCH_VM_ALLOC
kvm_arch_alloc_vm(void)2025 static inline struct kvm *kvm_arch_alloc_vm(void)
2026 {
2027 return kvzalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT);
2028 }
2029
2030 #define __KVM_HAVE_ARCH_VM_FREE
2031 void kvm_arch_free_vm(struct kvm *kvm);
2032
2033 #if IS_ENABLED(CONFIG_HYPERV)
2034 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
kvm_arch_flush_remote_tlbs(struct kvm * kvm)2035 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
2036 {
2037 if (kvm_x86_ops.flush_remote_tlbs &&
2038 !kvm_x86_call(flush_remote_tlbs)(kvm))
2039 return 0;
2040 else
2041 return -ENOTSUPP;
2042 }
2043
2044 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)2045 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
2046 u64 nr_pages)
2047 {
2048 if (!kvm_x86_ops.flush_remote_tlbs_range)
2049 return -EOPNOTSUPP;
2050
2051 return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages);
2052 }
2053 #endif /* CONFIG_HYPERV */
2054
2055 enum kvm_intr_type {
2056 /* Values are arbitrary, but must be non-zero. */
2057 KVM_HANDLING_IRQ = 1,
2058 KVM_HANDLING_NMI,
2059 };
2060
2061 /* Enable perf NMI and timer modes to work, and minimise false positives. */
2062 #define kvm_arch_pmi_in_guest(vcpu) \
2063 ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
2064 (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
2065
2066 void __init kvm_mmu_x86_module_init(void);
2067 int kvm_mmu_vendor_module_init(void);
2068 void kvm_mmu_vendor_module_exit(void);
2069
2070 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
2071 int kvm_mmu_create(struct kvm_vcpu *vcpu);
2072 int kvm_mmu_init_vm(struct kvm *kvm);
2073 void kvm_mmu_uninit_vm(struct kvm *kvm);
2074
2075 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
2076 struct kvm_memory_slot *slot);
2077
2078 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
2079 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
2080 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
2081 const struct kvm_memory_slot *memslot,
2082 int start_level);
2083 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
2084 const struct kvm_memory_slot *memslot,
2085 int target_level);
2086 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
2087 const struct kvm_memory_slot *memslot,
2088 u64 start, u64 end,
2089 int target_level);
2090 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
2091 const struct kvm_memory_slot *memslot);
2092 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
2093 const struct kvm_memory_slot *memslot);
2094 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
2095 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
2096 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
2097
2098 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
2099
2100 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2101 const void *val, int bytes);
2102
2103 extern bool tdp_enabled;
2104
2105 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
2106
2107 /*
2108 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
2109 * userspace I/O) to indicate that the emulation context
2110 * should be reused as is, i.e. skip initialization of
2111 * emulation context, instruction fetch and decode.
2112 *
2113 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
2114 * Indicates that only select instructions (tagged with
2115 * EmulateOnUD) should be emulated (to minimize the emulator
2116 * attack surface). See also EMULTYPE_TRAP_UD_FORCED.
2117 *
2118 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
2119 * decode the instruction length. For use *only* by
2120 * kvm_x86_ops.skip_emulated_instruction() implementations if
2121 * EMULTYPE_COMPLETE_USER_EXIT is not set.
2122 *
2123 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
2124 * retry native execution under certain conditions,
2125 * Can only be set in conjunction with EMULTYPE_PF.
2126 *
2127 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
2128 * triggered by KVM's magic "force emulation" prefix,
2129 * which is opt in via module param (off by default).
2130 * Bypasses EmulateOnUD restriction despite emulating
2131 * due to an intercepted #UD (see EMULTYPE_TRAP_UD).
2132 * Used to test the full emulator from userspace.
2133 *
2134 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
2135 * backdoor emulation, which is opt in via module param.
2136 * VMware backdoor emulation handles select instructions
2137 * and reinjects the #GP for all other cases.
2138 *
2139 * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
2140 * the CR2/GPA value pass on the stack is valid.
2141 *
2142 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
2143 * state and inject single-step #DBs after skipping
2144 * an instruction (after completing userspace I/O).
2145 *
2146 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
2147 * is attempting to write a gfn that contains one or
2148 * more of the PTEs used to translate the write itself,
2149 * and the owning page table is being shadowed by KVM.
2150 * If emulation of the faulting instruction fails and
2151 * this flag is set, KVM will exit to userspace instead
2152 * of retrying emulation as KVM cannot make forward
2153 * progress.
2154 *
2155 * If emulation fails for a write to guest page tables,
2156 * KVM unprotects (zaps) the shadow page for the target
2157 * gfn and resumes the guest to retry the non-emulatable
2158 * instruction (on hardware). Unprotecting the gfn
2159 * doesn't allow forward progress for a self-changing
2160 * access because doing so also zaps the translation for
2161 * the gfn, i.e. retrying the instruction will hit a
2162 * !PRESENT fault, which results in a new shadow page
2163 * and sends KVM back to square one.
2164 *
2165 * EMULTYPE_SKIP_SOFT_INT - Set in combination with EMULTYPE_SKIP to only skip
2166 * an instruction if it could generate a given software
2167 * interrupt, which must be encoded via
2168 * EMULTYPE_SET_SOFT_INT_VECTOR().
2169 */
2170 #define EMULTYPE_NO_DECODE (1 << 0)
2171 #define EMULTYPE_TRAP_UD (1 << 1)
2172 #define EMULTYPE_SKIP (1 << 2)
2173 #define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
2174 #define EMULTYPE_TRAP_UD_FORCED (1 << 4)
2175 #define EMULTYPE_VMWARE_GP (1 << 5)
2176 #define EMULTYPE_PF (1 << 6)
2177 #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
2178 #define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
2179 #define EMULTYPE_SKIP_SOFT_INT (1 << 9)
2180
2181 #define EMULTYPE_SET_SOFT_INT_VECTOR(v) ((u32)((v) & 0xff) << 16)
2182 #define EMULTYPE_GET_SOFT_INT_VECTOR(e) (((e) >> 16) & 0xff)
2183
kvm_can_emulate_event_vectoring(int emul_type)2184 static inline bool kvm_can_emulate_event_vectoring(int emul_type)
2185 {
2186 return !(emul_type & EMULTYPE_PF);
2187 }
2188
2189 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
2190 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
2191 void *insn, int insn_len);
2192 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
2193 u64 *data, u8 ndata);
2194 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
2195
2196 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
2197 void kvm_prepare_unexpected_reason_exit(struct kvm_vcpu *vcpu, u64 exit_reason);
2198
2199 void kvm_enable_efer_bits(u64);
2200 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2201 int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2202 int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2203 int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2204 int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2205 int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2206 int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
2207 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
2208 int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
2209 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
2210 int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
2211 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
2212 int kvm_emulate_invd(struct kvm_vcpu *vcpu);
2213 int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
2214 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
2215 int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
2216
2217 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
2218 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
2219 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
2220 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
2221 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
2222 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
2223
2224 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2225 void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2226 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2227 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
2228
2229 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
2230 int reason, bool has_error_code, u32 error_code);
2231
2232 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
2233 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
2234 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2235 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
2236 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2237 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
2238 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
2239 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
2240 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
2241 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
2242 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
2243 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
2244
2245 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2246 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2247
2248 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
2249 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
2250 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
2251
2252 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2253 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2254 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
2255 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
2256 bool has_error_code, u32 error_code);
2257 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
2258 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
2259 struct x86_exception *fault);
2260 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
2261 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
2262
__kvm_irq_line_state(unsigned long * irq_state,int irq_source_id,int level)2263 static inline int __kvm_irq_line_state(unsigned long *irq_state,
2264 int irq_source_id, int level)
2265 {
2266 /* Logical OR for level trig interrupt */
2267 if (level)
2268 __set_bit(irq_source_id, irq_state);
2269 else
2270 __clear_bit(irq_source_id, irq_state);
2271
2272 return !!(*irq_state);
2273 }
2274
2275 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
2276 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
2277
2278 void kvm_update_dr7(struct kvm_vcpu *vcpu);
2279
2280 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2281 bool always_retry);
2282
kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa)2283 static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
2284 gpa_t cr2_or_gpa)
2285 {
2286 return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
2287 }
2288
2289 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
2290 ulong roots_to_free);
2291 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
2292 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
2293 struct x86_exception *exception);
2294 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
2295 struct x86_exception *exception);
2296 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
2297 struct x86_exception *exception);
2298
2299 bool kvm_apicv_activated(struct kvm *kvm);
2300 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
2301 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
2302 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2303 enum kvm_apicv_inhibit reason, bool set);
2304 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2305 enum kvm_apicv_inhibit reason, bool set);
2306
kvm_set_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2307 static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
2308 enum kvm_apicv_inhibit reason)
2309 {
2310 kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
2311 }
2312
kvm_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2313 static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
2314 enum kvm_apicv_inhibit reason)
2315 {
2316 kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
2317 }
2318
2319 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
2320 void *insn, int insn_len);
2321 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
2322 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
2323 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2324 u64 addr, unsigned long roots);
2325 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
2326 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
2327
2328 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
2329 int tdp_max_root_level, int tdp_huge_page_level);
2330
2331
2332 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
2333 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
2334 #endif
2335
2336 #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
2337
kvm_read_ldt(void)2338 static inline u16 kvm_read_ldt(void)
2339 {
2340 u16 ldt;
2341 asm("sldt %0" : "=g"(ldt));
2342 return ldt;
2343 }
2344
kvm_load_ldt(u16 sel)2345 static inline void kvm_load_ldt(u16 sel)
2346 {
2347 asm("lldt %0" : : "rm"(sel));
2348 }
2349
2350 #ifdef CONFIG_X86_64
read_msr(unsigned long msr)2351 static inline unsigned long read_msr(unsigned long msr)
2352 {
2353 u64 value;
2354
2355 rdmsrq(msr, value);
2356 return value;
2357 }
2358 #endif
2359
kvm_inject_gp(struct kvm_vcpu * vcpu,u32 error_code)2360 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
2361 {
2362 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2363 }
2364
2365 #define TSS_IOPB_BASE_OFFSET 0x66
2366 #define TSS_BASE_SIZE 0x68
2367 #define TSS_IOPB_SIZE (65536 / 8)
2368 #define TSS_REDIRECTION_SIZE (256 / 8)
2369 #define RMODE_TSS_SIZE \
2370 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
2371
2372 enum {
2373 TASK_SWITCH_CALL = 0,
2374 TASK_SWITCH_IRET = 1,
2375 TASK_SWITCH_JMP = 2,
2376 TASK_SWITCH_GATE = 3,
2377 };
2378
2379 #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2380
2381 #ifdef CONFIG_KVM_SMM
2382 #define HF_SMM_MASK (1 << 1)
2383 #define HF_SMM_INSIDE_NMI_MASK (1 << 2)
2384
2385 # define KVM_MAX_NR_ADDRESS_SPACES 2
2386 /* SMM is currently unsupported for guests with private memory. */
2387 # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
2388 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2389 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2390 #else
2391 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
2392 #endif
2393
2394 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
2395 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
2396 int kvm_cpu_has_extint(struct kvm_vcpu *v);
2397 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2398 int kvm_cpu_get_extint(struct kvm_vcpu *v);
2399 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
2400 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2401
2402 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
2403 unsigned long ipi_bitmap_high, u32 min,
2404 unsigned long icr, int op_64_bit);
2405
2406 int kvm_add_user_return_msr(u32 msr);
2407 int kvm_find_user_return_msr(u32 msr);
2408 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2409 u64 kvm_get_user_return_msr(unsigned int slot);
2410
kvm_is_supported_user_return_msr(u32 msr)2411 static inline bool kvm_is_supported_user_return_msr(u32 msr)
2412 {
2413 return kvm_find_user_return_msr(msr) >= 0;
2414 }
2415
2416 u64 kvm_scale_tsc(u64 tsc, u64 ratio);
2417 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
2418 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
2419 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
2420
2421 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
2422 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
2423
2424 void kvm_make_scan_ioapic_request(struct kvm *kvm);
2425 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
2426 unsigned long *vcpu_bitmap);
2427
2428 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2429 struct kvm_async_pf *work);
2430 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2431 struct kvm_async_pf *work);
2432 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2433 struct kvm_async_pf *work);
2434 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
2435 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
2436 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
2437
2438 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
2439 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2440
2441 void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
2442 u32 size);
2443 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
2444 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
2445
kvm_irq_is_postable(struct kvm_lapic_irq * irq)2446 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
2447 {
2448 /* We can only post Fixed and LowPrio IRQs */
2449 return (irq->delivery_mode == APIC_DM_FIXED ||
2450 irq->delivery_mode == APIC_DM_LOWEST);
2451 }
2452
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)2453 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2454 {
2455 kvm_x86_call(vcpu_blocking)(vcpu);
2456 }
2457
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)2458 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2459 {
2460 kvm_x86_call(vcpu_unblocking)(vcpu);
2461 }
2462
kvm_cpu_get_apicid(int mps_cpu)2463 static inline int kvm_cpu_get_apicid(int mps_cpu)
2464 {
2465 #ifdef CONFIG_X86_LOCAL_APIC
2466 return default_cpu_present_to_apicid(mps_cpu);
2467 #else
2468 WARN_ON_ONCE(1);
2469 return BAD_APICID;
2470 #endif
2471 }
2472
2473 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
2474
2475 #define KVM_CLOCK_VALID_FLAGS \
2476 (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2477
2478 #define KVM_X86_VALID_QUIRKS \
2479 (KVM_X86_QUIRK_LINT0_REENABLED | \
2480 KVM_X86_QUIRK_CD_NW_CLEARED | \
2481 KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
2482 KVM_X86_QUIRK_OUT_7E_INC_RIP | \
2483 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
2484 KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
2485 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
2486 KVM_X86_QUIRK_SLOT_ZAP_ALL | \
2487 KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \
2488 KVM_X86_QUIRK_IGNORE_GUEST_PAT | \
2489 KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)
2490
2491 #define KVM_X86_CONDITIONAL_QUIRKS \
2492 (KVM_X86_QUIRK_CD_NW_CLEARED | \
2493 KVM_X86_QUIRK_IGNORE_GUEST_PAT)
2494
2495 /*
2496 * KVM previously used a u32 field in kvm_run to indicate the hypercall was
2497 * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
2498 * remaining 31 lower bits must be 0 to preserve ABI.
2499 */
2500 #define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
2501
kvm_arch_has_irq_bypass(void)2502 static inline bool kvm_arch_has_irq_bypass(void)
2503 {
2504 return enable_device_posted_irqs;
2505 }
2506
2507 #endif /* _ASM_X86_KVM_HOST_H */
2508