xref: /linux/arch/x86/include/asm/kvm_host.h (revision c924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This header defines architecture specific interfaces, x86 version
6  */
7 
8 #ifndef _ASM_X86_KVM_HOST_H
9 #define _ASM_X86_KVM_HOST_H
10 
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/tracepoint.h>
15 #include <linux/cpumask.h>
16 #include <linux/irq_work.h>
17 #include <linux/irq.h>
18 #include <linux/workqueue.h>
19 
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22 #include <linux/kvm_types.h>
23 #include <linux/perf_event.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/clocksource.h>
26 #include <linux/irqbypass.h>
27 #include <linux/kfifo.h>
28 #include <linux/sched/vhost_task.h>
29 #include <linux/call_once.h>
30 #include <linux/atomic.h>
31 
32 #include <asm/apic.h>
33 #include <asm/pvclock-abi.h>
34 #include <asm/desc.h>
35 #include <asm/mtrr.h>
36 #include <asm/msr-index.h>
37 #include <asm/asm.h>
38 #include <asm/kvm_page_track.h>
39 #include <asm/kvm_vcpu_regs.h>
40 #include <asm/reboot.h>
41 #include <hyperv/hvhdk.h>
42 
43 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
44 
45 /*
46  * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
47  * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
48  */
49 #ifdef CONFIG_KVM_MAX_NR_VCPUS
50 #define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS
51 #else
52 #define KVM_MAX_VCPUS 1024
53 #endif
54 
55 /*
56  * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
57  * might be larger than the actual number of VCPUs because the
58  * APIC ID encodes CPU topology information.
59  *
60  * In the worst case, we'll need less than one extra bit for the
61  * Core ID, and less than one extra bit for the Package (Die) ID,
62  * so ratio of 4 should be enough.
63  */
64 #define KVM_VCPU_ID_RATIO 4
65 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
66 
67 /* memory slots that are not exposed to userspace */
68 #define KVM_INTERNAL_MEM_SLOTS 3
69 
70 #define KVM_HALT_POLL_NS_DEFAULT 200000
71 
72 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
73 
74 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
75 					KVM_DIRTY_LOG_INITIALLY_SET)
76 
77 #define KVM_BUS_LOCK_DETECTION_VALID_MODE	(KVM_BUS_LOCK_DETECTION_OFF | \
78 						 KVM_BUS_LOCK_DETECTION_EXIT)
79 
80 #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS	(KVM_X86_NOTIFY_VMEXIT_ENABLED | \
81 						 KVM_X86_NOTIFY_VMEXIT_USER)
82 
83 /* x86-specific vcpu->requests bit members */
84 #define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
85 #define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
86 #define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
87 #define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
88 #define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
89 #define KVM_REQ_LOAD_MMU_PGD		KVM_ARCH_REQ(5)
90 #define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
91 #define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
92 #define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
93 #define KVM_REQ_NMI			KVM_ARCH_REQ(9)
94 #define KVM_REQ_PMU			KVM_ARCH_REQ(10)
95 #define KVM_REQ_PMI			KVM_ARCH_REQ(11)
96 #ifdef CONFIG_KVM_SMM
97 #define KVM_REQ_SMI			KVM_ARCH_REQ(12)
98 #endif
99 #define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
100 #define KVM_REQ_MCLOCK_INPROGRESS \
101 	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
102 #define KVM_REQ_SCAN_IOAPIC \
103 	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
104 #define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
105 #define KVM_REQ_APIC_PAGE_RELOAD \
106 	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
107 #define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
108 #define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
109 #define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
110 #define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
111 #define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
112 #define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
113 #define KVM_REQ_GET_NESTED_STATE_PAGES	KVM_ARCH_REQ(24)
114 #define KVM_REQ_APICV_UPDATE \
115 	KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
116 #define KVM_REQ_TLB_FLUSH_CURRENT	KVM_ARCH_REQ(26)
117 #define KVM_REQ_TLB_FLUSH_GUEST \
118 	KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
119 #define KVM_REQ_APF_READY		KVM_ARCH_REQ(28)
120 #define KVM_REQ_MSR_FILTER_CHANGED	KVM_ARCH_REQ(29)
121 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
122 	KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
123 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
124 	KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
125 #define KVM_REQ_HV_TLB_FLUSH \
126 	KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
127 #define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE	KVM_ARCH_REQ(34)
128 
129 #define CR0_RESERVED_BITS                                               \
130 	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
131 			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
132 			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
133 
134 #define CR4_RESERVED_BITS                                               \
135 	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
136 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
137 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
138 			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
139 			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
140 			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
141 			  | X86_CR4_LAM_SUP))
142 
143 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
144 
145 
146 
147 #define INVALID_PAGE (~(hpa_t)0)
148 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
149 
150 /* KVM Hugepage definitions for x86 */
151 #define KVM_MAX_HUGEPAGE_LEVEL	PG_LEVEL_1G
152 #define KVM_NR_PAGE_SIZES	(KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
153 #define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
154 #define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
155 #define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
156 #define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
157 #define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
158 
159 #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
160 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
161 #define KVM_MMU_HASH_SHIFT 12
162 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
163 #define KVM_MIN_FREE_MMU_PAGES 5
164 #define KVM_REFILL_PAGES 25
165 #define KVM_MAX_CPUID_ENTRIES 256
166 #define KVM_NR_VAR_MTRR 8
167 
168 #define ASYNC_PF_PER_VCPU 64
169 
170 enum kvm_reg {
171 	VCPU_REGS_RAX = __VCPU_REGS_RAX,
172 	VCPU_REGS_RCX = __VCPU_REGS_RCX,
173 	VCPU_REGS_RDX = __VCPU_REGS_RDX,
174 	VCPU_REGS_RBX = __VCPU_REGS_RBX,
175 	VCPU_REGS_RSP = __VCPU_REGS_RSP,
176 	VCPU_REGS_RBP = __VCPU_REGS_RBP,
177 	VCPU_REGS_RSI = __VCPU_REGS_RSI,
178 	VCPU_REGS_RDI = __VCPU_REGS_RDI,
179 #ifdef CONFIG_X86_64
180 	VCPU_REGS_R8  = __VCPU_REGS_R8,
181 	VCPU_REGS_R9  = __VCPU_REGS_R9,
182 	VCPU_REGS_R10 = __VCPU_REGS_R10,
183 	VCPU_REGS_R11 = __VCPU_REGS_R11,
184 	VCPU_REGS_R12 = __VCPU_REGS_R12,
185 	VCPU_REGS_R13 = __VCPU_REGS_R13,
186 	VCPU_REGS_R14 = __VCPU_REGS_R14,
187 	VCPU_REGS_R15 = __VCPU_REGS_R15,
188 #endif
189 	VCPU_REGS_RIP,
190 	NR_VCPU_REGS,
191 
192 	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
193 	VCPU_EXREG_CR0,
194 	VCPU_EXREG_CR3,
195 	VCPU_EXREG_CR4,
196 	VCPU_EXREG_RFLAGS,
197 	VCPU_EXREG_SEGMENTS,
198 	VCPU_EXREG_EXIT_INFO_1,
199 	VCPU_EXREG_EXIT_INFO_2,
200 };
201 
202 enum {
203 	VCPU_SREG_ES,
204 	VCPU_SREG_CS,
205 	VCPU_SREG_SS,
206 	VCPU_SREG_DS,
207 	VCPU_SREG_FS,
208 	VCPU_SREG_GS,
209 	VCPU_SREG_TR,
210 	VCPU_SREG_LDTR,
211 };
212 
213 enum exit_fastpath_completion {
214 	EXIT_FASTPATH_NONE,
215 	EXIT_FASTPATH_REENTER_GUEST,
216 	EXIT_FASTPATH_EXIT_HANDLED,
217 	EXIT_FASTPATH_EXIT_USERSPACE,
218 };
219 typedef enum exit_fastpath_completion fastpath_t;
220 
221 struct x86_emulate_ctxt;
222 struct x86_exception;
223 union kvm_smram;
224 enum x86_intercept;
225 enum x86_intercept_stage;
226 
227 #define KVM_NR_DB_REGS	4
228 
229 #define DR6_BUS_LOCK   (1 << 11)
230 #define DR6_BD		(1 << 13)
231 #define DR6_BS		(1 << 14)
232 #define DR6_BT		(1 << 15)
233 #define DR6_RTM		(1 << 16)
234 /*
235  * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
236  * We can regard all the bits in DR6_FIXED_1 as active_low bits;
237  * they will never be 0 for now, but when they are defined
238  * in the future it will require no code change.
239  *
240  * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
241  */
242 #define DR6_ACTIVE_LOW	0xffff0ff0
243 #define DR6_VOLATILE	0x0001e80f
244 #define DR6_FIXED_1	(DR6_ACTIVE_LOW & ~DR6_VOLATILE)
245 
246 #define DR7_BP_EN_MASK	0x000000ff
247 #define DR7_GE		(1 << 9)
248 #define DR7_GD		(1 << 13)
249 #define DR7_FIXED_1	0x00000400
250 #define DR7_VOLATILE	0xffff2bff
251 
252 #define KVM_GUESTDBG_VALID_MASK \
253 	(KVM_GUESTDBG_ENABLE | \
254 	KVM_GUESTDBG_SINGLESTEP | \
255 	KVM_GUESTDBG_USE_HW_BP | \
256 	KVM_GUESTDBG_USE_SW_BP | \
257 	KVM_GUESTDBG_INJECT_BP | \
258 	KVM_GUESTDBG_INJECT_DB | \
259 	KVM_GUESTDBG_BLOCKIRQ)
260 
261 #define PFERR_PRESENT_MASK	BIT(0)
262 #define PFERR_WRITE_MASK	BIT(1)
263 #define PFERR_USER_MASK		BIT(2)
264 #define PFERR_RSVD_MASK		BIT(3)
265 #define PFERR_FETCH_MASK	BIT(4)
266 #define PFERR_PK_MASK		BIT(5)
267 #define PFERR_SGX_MASK		BIT(15)
268 #define PFERR_GUEST_RMP_MASK	BIT_ULL(31)
269 #define PFERR_GUEST_FINAL_MASK	BIT_ULL(32)
270 #define PFERR_GUEST_PAGE_MASK	BIT_ULL(33)
271 #define PFERR_GUEST_ENC_MASK	BIT_ULL(34)
272 #define PFERR_GUEST_SIZEM_MASK	BIT_ULL(35)
273 #define PFERR_GUEST_VMPL_MASK	BIT_ULL(36)
274 
275 /*
276  * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
277  * when emulating instructions that triggers implicit access.
278  */
279 #define PFERR_IMPLICIT_ACCESS	BIT_ULL(48)
280 /*
281  * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
282  * when the guest was accessing private memory.
283  */
284 #define PFERR_PRIVATE_ACCESS   BIT_ULL(49)
285 #define PFERR_SYNTHETIC_MASK   (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
286 
287 /* apic attention bits */
288 #define KVM_APIC_CHECK_VAPIC	0
289 /*
290  * The following bit is set with PV-EOI, unset on EOI.
291  * We detect PV-EOI changes by guest by comparing
292  * this bit with PV-EOI in guest memory.
293  * See the implementation in apic_update_pv_eoi.
294  */
295 #define KVM_APIC_PV_EOI_PENDING	1
296 
297 struct kvm_kernel_irq_routing_entry;
298 
299 /*
300  * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
301  * also includes TDP pages) to determine whether or not a page can be used in
302  * the given MMU context.  This is a subset of the overall kvm_cpu_role to
303  * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
304  * allocating 2 bytes per gfn instead of 4 bytes per gfn.
305  *
306  * Upper-level shadow pages having gptes are tracked for write-protection via
307  * gfn_write_track.  As above, gfn_write_track is a 16 bit counter, so KVM must
308  * not create more than 2^16-1 upper-level shadow pages at a single gfn,
309  * otherwise gfn_write_track will overflow and explosions will ensue.
310  *
311  * A unique shadow page (SP) for a gfn is created if and only if an existing SP
312  * cannot be reused.  The ability to reuse a SP is tracked by its role, which
313  * incorporates various mode bits and properties of the SP.  Roughly speaking,
314  * the number of unique SPs that can theoretically be created is 2^n, where n
315  * is the number of bits that are used to compute the role.
316  *
317  * But, even though there are 20 bits in the mask below, not all combinations
318  * of modes and flags are possible:
319  *
320  *   - invalid shadow pages are not accounted, mirror pages are not shadowed,
321  *     so the bits are effectively 18.
322  *
323  *   - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
324  *     execonly and ad_disabled are only used for nested EPT which has
325  *     has_4_byte_gpte=0.  Therefore, 2 bits are always unused.
326  *
327  *   - the 4 bits of level are effectively limited to the values 2/3/4/5,
328  *     as 4k SPs are not tracked (allowed to go unsync).  In addition non-PAE
329  *     paging has exactly one upper level, making level completely redundant
330  *     when has_4_byte_gpte=1.
331  *
332  *   - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
333  *     cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
334  *
335  * Therefore, the maximum number of possible upper-level shadow pages for a
336  * single gfn is a bit less than 2^13.
337  */
338 union kvm_mmu_page_role {
339 	u32 word;
340 	struct {
341 		unsigned level:4;
342 		unsigned has_4_byte_gpte:1;
343 		unsigned quadrant:2;
344 		unsigned direct:1;
345 		unsigned access:3;
346 		unsigned invalid:1;
347 		unsigned efer_nx:1;
348 		unsigned cr0_wp:1;
349 		unsigned smep_andnot_wp:1;
350 		unsigned smap_andnot_wp:1;
351 		unsigned ad_disabled:1;
352 		unsigned guest_mode:1;
353 		unsigned passthrough:1;
354 		unsigned is_mirror:1;
355 		unsigned :4;
356 
357 		/*
358 		 * This is left at the top of the word so that
359 		 * kvm_memslots_for_spte_role can extract it with a
360 		 * simple shift.  While there is room, give it a whole
361 		 * byte so it is also faster to load it from memory.
362 		 */
363 		unsigned smm:8;
364 	};
365 };
366 
367 /*
368  * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
369  * relevant to the current MMU configuration.   When loading CR0, CR4, or EFER,
370  * including on nested transitions, if nothing in the full role changes then
371  * MMU re-configuration can be skipped. @valid bit is set on first usage so we
372  * don't treat all-zero structure as valid data.
373  *
374  * The properties that are tracked in the extended role but not the page role
375  * are for things that either (a) do not affect the validity of the shadow page
376  * or (b) are indirectly reflected in the shadow page's role.  For example,
377  * CR4.PKE only affects permission checks for software walks of the guest page
378  * tables (because KVM doesn't support Protection Keys with shadow paging), and
379  * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
380  *
381  * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
382  * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
383  * SMAP, but the MMU's permission checks for software walks need to be SMEP and
384  * SMAP aware regardless of CR0.WP.
385  */
386 union kvm_mmu_extended_role {
387 	u32 word;
388 	struct {
389 		unsigned int valid:1;
390 		unsigned int execonly:1;
391 		unsigned int cr4_pse:1;
392 		unsigned int cr4_pke:1;
393 		unsigned int cr4_smap:1;
394 		unsigned int cr4_smep:1;
395 		unsigned int cr4_la57:1;
396 		unsigned int efer_lma:1;
397 	};
398 };
399 
400 union kvm_cpu_role {
401 	u64 as_u64;
402 	struct {
403 		union kvm_mmu_page_role base;
404 		union kvm_mmu_extended_role ext;
405 	};
406 };
407 
408 struct kvm_rmap_head {
409 	atomic_long_t val;
410 };
411 
412 struct kvm_pio_request {
413 	unsigned long linear_rip;
414 	unsigned long count;
415 	int in;
416 	int port;
417 	int size;
418 };
419 
420 #define PT64_ROOT_MAX_LEVEL 5
421 
422 struct rsvd_bits_validate {
423 	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
424 	u64 bad_mt_xwr;
425 };
426 
427 struct kvm_mmu_root_info {
428 	gpa_t pgd;
429 	hpa_t hpa;
430 };
431 
432 #define KVM_MMU_ROOT_INFO_INVALID \
433 	((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
434 
435 #define KVM_MMU_NUM_PREV_ROOTS 3
436 
437 #define KVM_MMU_ROOT_CURRENT		BIT(0)
438 #define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
439 #define KVM_MMU_ROOTS_ALL		(BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
440 
441 #define KVM_HAVE_MMU_RWLOCK
442 
443 struct kvm_mmu_page;
444 struct kvm_page_fault;
445 
446 /*
447  * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
448  * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
449  * current mmu mode.
450  */
451 struct kvm_mmu {
452 	unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
453 	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
454 	int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
455 	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
456 				  struct x86_exception *fault);
457 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
458 			    gpa_t gva_or_gpa, u64 access,
459 			    struct x86_exception *exception);
460 	int (*sync_spte)(struct kvm_vcpu *vcpu,
461 			 struct kvm_mmu_page *sp, int i);
462 	struct kvm_mmu_root_info root;
463 	hpa_t mirror_root_hpa;
464 	union kvm_cpu_role cpu_role;
465 	union kvm_mmu_page_role root_role;
466 
467 	/*
468 	* The pkru_mask indicates if protection key checks are needed.  It
469 	* consists of 16 domains indexed by page fault error code bits [4:1],
470 	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
471 	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
472 	*/
473 	u32 pkru_mask;
474 
475 	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
476 
477 	/*
478 	 * Bitmap; bit set = permission fault
479 	 * Byte index: page fault error code [4:1]
480 	 * Bit index: pte permissions in ACC_* format
481 	 */
482 	u8 permissions[16];
483 
484 	u64 *pae_root;
485 	u64 *pml4_root;
486 	u64 *pml5_root;
487 
488 	/*
489 	 * check zero bits on shadow page table entries, these
490 	 * bits include not only hardware reserved bits but also
491 	 * the bits spte never used.
492 	 */
493 	struct rsvd_bits_validate shadow_zero_check;
494 
495 	struct rsvd_bits_validate guest_rsvd_check;
496 
497 	u64 pdptrs[4]; /* pae */
498 };
499 
500 enum pmc_type {
501 	KVM_PMC_GP = 0,
502 	KVM_PMC_FIXED,
503 };
504 
505 struct kvm_pmc {
506 	enum pmc_type type;
507 	u8 idx;
508 	bool is_paused;
509 	bool intr;
510 	/*
511 	 * Base value of the PMC counter, relative to the *consumed* count in
512 	 * the associated perf_event.  This value includes counter updates from
513 	 * the perf_event and emulated_count since the last time the counter
514 	 * was reprogrammed, but it is *not* the current value as seen by the
515 	 * guest or userspace.
516 	 *
517 	 * The count is relative to the associated perf_event so that KVM
518 	 * doesn't need to reprogram the perf_event every time the guest writes
519 	 * to the counter.
520 	 */
521 	u64 counter;
522 	/*
523 	 * PMC events triggered by KVM emulation that haven't been fully
524 	 * processed, i.e. haven't undergone overflow detection.
525 	 */
526 	u64 emulated_counter;
527 	u64 eventsel;
528 	struct perf_event *perf_event;
529 	struct kvm_vcpu *vcpu;
530 	/*
531 	 * only for creating or reusing perf_event,
532 	 * eventsel value for general purpose counters,
533 	 * ctrl value for fixed counters.
534 	 */
535 	u64 current_config;
536 };
537 
538 /* More counters may conflict with other existing Architectural MSRs */
539 #define KVM_MAX(a, b)	((a) >= (b) ? (a) : (b))
540 #define KVM_MAX_NR_INTEL_GP_COUNTERS	8
541 #define KVM_MAX_NR_AMD_GP_COUNTERS	6
542 #define KVM_MAX_NR_GP_COUNTERS		KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
543 						KVM_MAX_NR_AMD_GP_COUNTERS)
544 
545 #define KVM_MAX_NR_INTEL_FIXED_COUTNERS	3
546 #define KVM_MAX_NR_AMD_FIXED_COUTNERS	0
547 #define KVM_MAX_NR_FIXED_COUNTERS	KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUTNERS, \
548 						KVM_MAX_NR_AMD_FIXED_COUTNERS)
549 
550 struct kvm_pmu {
551 	u8 version;
552 	unsigned nr_arch_gp_counters;
553 	unsigned nr_arch_fixed_counters;
554 	unsigned available_event_types;
555 	u64 fixed_ctr_ctrl;
556 	u64 fixed_ctr_ctrl_rsvd;
557 	u64 global_ctrl;
558 	u64 global_status;
559 	u64 counter_bitmask[2];
560 	u64 global_ctrl_rsvd;
561 	u64 global_status_rsvd;
562 	u64 reserved_bits;
563 	u64 raw_event_mask;
564 	struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS];
565 	struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS];
566 
567 	/*
568 	 * Overlay the bitmap with a 64-bit atomic so that all bits can be
569 	 * set in a single access, e.g. to reprogram all counters when the PMU
570 	 * filter changes.
571 	 */
572 	union {
573 		DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
574 		atomic64_t __reprogram_pmi;
575 	};
576 	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
577 	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
578 
579 	u64 ds_area;
580 	u64 pebs_enable;
581 	u64 pebs_enable_rsvd;
582 	u64 pebs_data_cfg;
583 	u64 pebs_data_cfg_rsvd;
584 
585 	/*
586 	 * If a guest counter is cross-mapped to host counter with different
587 	 * index, its PEBS capability will be temporarily disabled.
588 	 *
589 	 * The user should make sure that this mask is updated
590 	 * after disabling interrupts and before perf_guest_get_msrs();
591 	 */
592 	u64 host_cross_mapped_mask;
593 
594 	/*
595 	 * The gate to release perf_events not marked in
596 	 * pmc_in_use only once in a vcpu time slice.
597 	 */
598 	bool need_cleanup;
599 
600 	/*
601 	 * The total number of programmed perf_events and it helps to avoid
602 	 * redundant check before cleanup if guest don't use vPMU at all.
603 	 */
604 	u8 event_count;
605 };
606 
607 struct kvm_pmu_ops;
608 
609 enum {
610 	KVM_DEBUGREG_BP_ENABLED = 1,
611 	KVM_DEBUGREG_WONT_EXIT = 2,
612 };
613 
614 struct kvm_mtrr {
615 	u64 var[KVM_NR_VAR_MTRR * 2];
616 	u64 fixed_64k;
617 	u64 fixed_16k[2];
618 	u64 fixed_4k[8];
619 	u64 deftype;
620 };
621 
622 /* Hyper-V SynIC timer */
623 struct kvm_vcpu_hv_stimer {
624 	struct hrtimer timer;
625 	int index;
626 	union hv_stimer_config config;
627 	u64 count;
628 	u64 exp_time;
629 	struct hv_message msg;
630 	bool msg_pending;
631 };
632 
633 /* Hyper-V synthetic interrupt controller (SynIC)*/
634 struct kvm_vcpu_hv_synic {
635 	u64 version;
636 	u64 control;
637 	u64 msg_page;
638 	u64 evt_page;
639 	atomic64_t sint[HV_SYNIC_SINT_COUNT];
640 	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
641 	DECLARE_BITMAP(auto_eoi_bitmap, 256);
642 	DECLARE_BITMAP(vec_bitmap, 256);
643 	bool active;
644 	bool dont_zero_synic_pages;
645 };
646 
647 /* The maximum number of entries on the TLB flush fifo. */
648 #define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
649 /*
650  * Note: the following 'magic' entry is made up by KVM to avoid putting
651  * anything besides GVA on the TLB flush fifo. It is theoretically possible
652  * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
653  * which will look identical. KVM's action to 'flush everything' instead of
654  * flushing these particular addresses is, however, fully legitimate as
655  * flushing more than requested is always OK.
656  */
657 #define KVM_HV_TLB_FLUSHALL_ENTRY  ((u64)-1)
658 
659 enum hv_tlb_flush_fifos {
660 	HV_L1_TLB_FLUSH_FIFO,
661 	HV_L2_TLB_FLUSH_FIFO,
662 	HV_NR_TLB_FLUSH_FIFOS,
663 };
664 
665 struct kvm_vcpu_hv_tlb_flush_fifo {
666 	spinlock_t write_lock;
667 	DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
668 };
669 
670 /* Hyper-V per vcpu emulation context */
671 struct kvm_vcpu_hv {
672 	struct kvm_vcpu *vcpu;
673 	u32 vp_index;
674 	u64 hv_vapic;
675 	s64 runtime_offset;
676 	struct kvm_vcpu_hv_synic synic;
677 	struct kvm_hyperv_exit exit;
678 	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
679 	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
680 	bool enforce_cpuid;
681 	struct {
682 		u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
683 		u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
684 		u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
685 		u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
686 		u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
687 		u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
688 		u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
689 		u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
690 	} cpuid_cache;
691 
692 	struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
693 
694 	/* Preallocated buffer for handling hypercalls passing sparse vCPU set */
695 	u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
696 
697 	struct hv_vp_assist_page vp_assist_page;
698 
699 	struct {
700 		u64 pa_page_gpa;
701 		u64 vm_id;
702 		u32 vp_id;
703 	} nested;
704 };
705 
706 struct kvm_hypervisor_cpuid {
707 	u32 base;
708 	u32 limit;
709 };
710 
711 #ifdef CONFIG_KVM_XEN
712 /* Xen HVM per vcpu emulation context */
713 struct kvm_vcpu_xen {
714 	u64 hypercall_rip;
715 	u32 current_runstate;
716 	u8 upcall_vector;
717 	struct gfn_to_pfn_cache vcpu_info_cache;
718 	struct gfn_to_pfn_cache vcpu_time_info_cache;
719 	struct gfn_to_pfn_cache runstate_cache;
720 	struct gfn_to_pfn_cache runstate2_cache;
721 	u64 last_steal;
722 	u64 runstate_entry_time;
723 	u64 runstate_times[4];
724 	unsigned long evtchn_pending_sel;
725 	u32 vcpu_id; /* The Xen / ACPI vCPU ID */
726 	u32 timer_virq;
727 	u64 timer_expires; /* In guest epoch */
728 	atomic_t timer_pending;
729 	struct hrtimer timer;
730 	int poll_evtchn;
731 	struct timer_list poll_timer;
732 	struct kvm_hypervisor_cpuid cpuid;
733 };
734 #endif
735 
736 struct kvm_queued_exception {
737 	bool pending;
738 	bool injected;
739 	bool has_error_code;
740 	u8 vector;
741 	u32 error_code;
742 	unsigned long payload;
743 	bool has_payload;
744 };
745 
746 /*
747  * Hardware-defined CPUID leafs that are either scattered by the kernel or are
748  * unknown to the kernel, but need to be directly used by KVM.  Note, these
749  * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
750  */
751 enum kvm_only_cpuid_leafs {
752 	CPUID_12_EAX	 = NCAPINTS,
753 	CPUID_7_1_EDX,
754 	CPUID_8000_0007_EDX,
755 	CPUID_8000_0022_EAX,
756 	CPUID_7_2_EDX,
757 	CPUID_24_0_EBX,
758 	NR_KVM_CPU_CAPS,
759 
760 	NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
761 };
762 
763 struct kvm_vcpu_arch {
764 	/*
765 	 * rip and regs accesses must go through
766 	 * kvm_{register,rip}_{read,write} functions.
767 	 */
768 	unsigned long regs[NR_VCPU_REGS];
769 	u32 regs_avail;
770 	u32 regs_dirty;
771 
772 	unsigned long cr0;
773 	unsigned long cr0_guest_owned_bits;
774 	unsigned long cr2;
775 	unsigned long cr3;
776 	unsigned long cr4;
777 	unsigned long cr4_guest_owned_bits;
778 	unsigned long cr4_guest_rsvd_bits;
779 	unsigned long cr8;
780 	u32 host_pkru;
781 	u32 pkru;
782 	u32 hflags;
783 	u64 efer;
784 	u64 host_debugctl;
785 	u64 apic_base;
786 	struct kvm_lapic *apic;    /* kernel irqchip context */
787 	bool load_eoi_exitmap_pending;
788 	DECLARE_BITMAP(ioapic_handled_vectors, 256);
789 	unsigned long apic_attention;
790 	int32_t apic_arb_prio;
791 	int mp_state;
792 	u64 ia32_misc_enable_msr;
793 	u64 smbase;
794 	u64 smi_count;
795 	bool at_instruction_boundary;
796 	bool tpr_access_reporting;
797 	bool xfd_no_write_intercept;
798 	u64 ia32_xss;
799 	u64 microcode_version;
800 	u64 arch_capabilities;
801 	u64 perf_capabilities;
802 
803 	/*
804 	 * Paging state of the vcpu
805 	 *
806 	 * If the vcpu runs in guest mode with two level paging this still saves
807 	 * the paging mode of the l1 guest. This context is always used to
808 	 * handle faults.
809 	 */
810 	struct kvm_mmu *mmu;
811 
812 	/* Non-nested MMU for L1 */
813 	struct kvm_mmu root_mmu;
814 
815 	/* L1 MMU when running nested */
816 	struct kvm_mmu guest_mmu;
817 
818 	/*
819 	 * Paging state of an L2 guest (used for nested npt)
820 	 *
821 	 * This context will save all necessary information to walk page tables
822 	 * of an L2 guest. This context is only initialized for page table
823 	 * walking and not for faulting since we never handle l2 page faults on
824 	 * the host.
825 	 */
826 	struct kvm_mmu nested_mmu;
827 
828 	/*
829 	 * Pointer to the mmu context currently used for
830 	 * gva_to_gpa translations.
831 	 */
832 	struct kvm_mmu *walk_mmu;
833 
834 	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
835 	struct kvm_mmu_memory_cache mmu_shadow_page_cache;
836 	struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
837 	struct kvm_mmu_memory_cache mmu_page_header_cache;
838 	/*
839 	 * This cache is to allocate external page table. E.g. private EPT used
840 	 * by the TDX module.
841 	 */
842 	struct kvm_mmu_memory_cache mmu_external_spt_cache;
843 
844 	/*
845 	 * QEMU userspace and the guest each have their own FPU state.
846 	 * In vcpu_run, we switch between the user and guest FPU contexts.
847 	 * While running a VCPU, the VCPU thread will have the guest FPU
848 	 * context.
849 	 *
850 	 * Note that while the PKRU state lives inside the fpu registers,
851 	 * it is switched out separately at VMENTER and VMEXIT time. The
852 	 * "guest_fpstate" state here contains the guest FPU context, with the
853 	 * host PRKU bits.
854 	 */
855 	struct fpu_guest guest_fpu;
856 
857 	u64 xcr0;
858 	u64 guest_supported_xcr0;
859 
860 	struct kvm_pio_request pio;
861 	void *pio_data;
862 	void *sev_pio_data;
863 	unsigned sev_pio_count;
864 
865 	u8 event_exit_inst_len;
866 
867 	bool exception_from_userspace;
868 
869 	/* Exceptions to be injected to the guest. */
870 	struct kvm_queued_exception exception;
871 	/* Exception VM-Exits to be synthesized to L1. */
872 	struct kvm_queued_exception exception_vmexit;
873 
874 	struct kvm_queued_interrupt {
875 		bool injected;
876 		bool soft;
877 		u8 nr;
878 	} interrupt;
879 
880 	int halt_request; /* real mode on Intel only */
881 
882 	int cpuid_nent;
883 	struct kvm_cpuid_entry2 *cpuid_entries;
884 	bool cpuid_dynamic_bits_dirty;
885 	bool is_amd_compatible;
886 
887 	/*
888 	 * cpu_caps holds the effective guest capabilities, i.e. the features
889 	 * the vCPU is allowed to use.  Typically, but not always, features can
890 	 * be used by the guest if and only if both KVM and userspace want to
891 	 * expose the feature to the guest.
892 	 *
893 	 * A common exception is for virtualization holes, i.e. when KVM can't
894 	 * prevent the guest from using a feature, in which case the vCPU "has"
895 	 * the feature regardless of what KVM or userspace desires.
896 	 *
897 	 * Note, features that don't require KVM involvement in any way are
898 	 * NOT enforced/sanitized by KVM, i.e. are taken verbatim from the
899 	 * guest CPUID provided by userspace.
900 	 */
901 	u32 cpu_caps[NR_KVM_CPU_CAPS];
902 
903 	u64 reserved_gpa_bits;
904 	int maxphyaddr;
905 
906 	/* emulate context */
907 
908 	struct x86_emulate_ctxt *emulate_ctxt;
909 	bool emulate_regs_need_sync_to_vcpu;
910 	bool emulate_regs_need_sync_from_vcpu;
911 	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
912 
913 	gpa_t time;
914 	s8  pvclock_tsc_shift;
915 	u32 pvclock_tsc_mul;
916 	unsigned int hw_tsc_khz;
917 	struct gfn_to_pfn_cache pv_time;
918 	/* set guest stopped flag in pvclock flags field */
919 	bool pvclock_set_guest_stopped_request;
920 
921 	struct {
922 		u8 preempted;
923 		u64 msr_val;
924 		u64 last_steal;
925 		struct gfn_to_hva_cache cache;
926 	} st;
927 
928 	u64 l1_tsc_offset;
929 	u64 tsc_offset; /* current tsc offset */
930 	u64 last_guest_tsc;
931 	u64 last_host_tsc;
932 	u64 tsc_offset_adjustment;
933 	u64 this_tsc_nsec;
934 	u64 this_tsc_write;
935 	u64 this_tsc_generation;
936 	bool tsc_catchup;
937 	bool tsc_always_catchup;
938 	s8 virtual_tsc_shift;
939 	u32 virtual_tsc_mult;
940 	u32 virtual_tsc_khz;
941 	s64 ia32_tsc_adjust_msr;
942 	u64 msr_ia32_power_ctl;
943 	u64 l1_tsc_scaling_ratio;
944 	u64 tsc_scaling_ratio; /* current scaling ratio */
945 
946 	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
947 	/* Number of NMIs pending injection, not including hardware vNMIs. */
948 	unsigned int nmi_pending;
949 	bool nmi_injected;    /* Trying to inject an NMI this entry */
950 	bool smi_pending;    /* SMI queued after currently running handler */
951 	u8 handling_intr_from_guest;
952 
953 	struct kvm_mtrr mtrr_state;
954 	u64 pat;
955 
956 	unsigned switch_db_regs;
957 	unsigned long db[KVM_NR_DB_REGS];
958 	unsigned long dr6;
959 	unsigned long dr7;
960 	unsigned long eff_db[KVM_NR_DB_REGS];
961 	unsigned long guest_debug_dr7;
962 	u64 msr_platform_info;
963 	u64 msr_misc_features_enables;
964 
965 	u64 mcg_cap;
966 	u64 mcg_status;
967 	u64 mcg_ctl;
968 	u64 mcg_ext_ctl;
969 	u64 *mce_banks;
970 	u64 *mci_ctl2_banks;
971 
972 	/* Cache MMIO info */
973 	u64 mmio_gva;
974 	unsigned mmio_access;
975 	gfn_t mmio_gfn;
976 	u64 mmio_gen;
977 
978 	struct kvm_pmu pmu;
979 
980 	/* used for guest single stepping over the given code position */
981 	unsigned long singlestep_rip;
982 
983 #ifdef CONFIG_KVM_HYPERV
984 	bool hyperv_enabled;
985 	struct kvm_vcpu_hv *hyperv;
986 #endif
987 #ifdef CONFIG_KVM_XEN
988 	struct kvm_vcpu_xen xen;
989 #endif
990 	cpumask_var_t wbinvd_dirty_mask;
991 
992 	unsigned long last_retry_eip;
993 	unsigned long last_retry_addr;
994 
995 	struct {
996 		bool halted;
997 		gfn_t gfns[ASYNC_PF_PER_VCPU];
998 		struct gfn_to_hva_cache data;
999 		u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
1000 		u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
1001 		u16 vec;
1002 		u32 id;
1003 		u32 host_apf_flags;
1004 		bool send_always;
1005 		bool delivery_as_pf_vmexit;
1006 		bool pageready_pending;
1007 	} apf;
1008 
1009 	/* OSVW MSRs (AMD only) */
1010 	struct {
1011 		u64 length;
1012 		u64 status;
1013 	} osvw;
1014 
1015 	struct {
1016 		u64 msr_val;
1017 		struct gfn_to_hva_cache data;
1018 	} pv_eoi;
1019 
1020 	u64 msr_kvm_poll_control;
1021 
1022 	/* pv related host specific info */
1023 	struct {
1024 		bool pv_unhalted;
1025 	} pv;
1026 
1027 	int pending_ioapic_eoi;
1028 	int pending_external_vector;
1029 
1030 	/* be preempted when it's in kernel-mode(cpl=0) */
1031 	bool preempted_in_kernel;
1032 
1033 	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
1034 	bool l1tf_flush_l1d;
1035 
1036 	/* Host CPU on which VM-entry was most recently attempted */
1037 	int last_vmentry_cpu;
1038 
1039 	/* AMD MSRC001_0015 Hardware Configuration */
1040 	u64 msr_hwcr;
1041 
1042 	/* pv related cpuid info */
1043 	struct {
1044 		/*
1045 		 * value of the eax register in the KVM_CPUID_FEATURES CPUID
1046 		 * leaf.
1047 		 */
1048 		u32 features;
1049 
1050 		/*
1051 		 * indicates whether pv emulation should be disabled if features
1052 		 * are not present in the guest's cpuid
1053 		 */
1054 		bool enforce;
1055 	} pv_cpuid;
1056 
1057 	/* Protected Guests */
1058 	bool guest_state_protected;
1059 	bool guest_tsc_protected;
1060 
1061 	/*
1062 	 * Set when PDPTS were loaded directly by the userspace without
1063 	 * reading the guest memory
1064 	 */
1065 	bool pdptrs_from_userspace;
1066 
1067 #if IS_ENABLED(CONFIG_HYPERV)
1068 	hpa_t hv_root_tdp;
1069 #endif
1070 };
1071 
1072 struct kvm_lpage_info {
1073 	int disallow_lpage;
1074 };
1075 
1076 struct kvm_arch_memory_slot {
1077 	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
1078 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1079 	unsigned short *gfn_write_track;
1080 };
1081 
1082 /*
1083  * Track the mode of the optimized logical map, as the rules for decoding the
1084  * destination vary per mode.  Enabling the optimized logical map requires all
1085  * software-enabled local APIs to be in the same mode, each addressable APIC to
1086  * be mapped to only one MDA, and each MDA to map to at most one APIC.
1087  */
1088 enum kvm_apic_logical_mode {
1089 	/* All local APICs are software disabled. */
1090 	KVM_APIC_MODE_SW_DISABLED,
1091 	/* All software enabled local APICs in xAPIC cluster addressing mode. */
1092 	KVM_APIC_MODE_XAPIC_CLUSTER,
1093 	/* All software enabled local APICs in xAPIC flat addressing mode. */
1094 	KVM_APIC_MODE_XAPIC_FLAT,
1095 	/* All software enabled local APICs in x2APIC mode. */
1096 	KVM_APIC_MODE_X2APIC,
1097 	/*
1098 	 * Optimized map disabled, e.g. not all local APICs in the same logical
1099 	 * mode, same logical ID assigned to multiple APICs, etc.
1100 	 */
1101 	KVM_APIC_MODE_MAP_DISABLED,
1102 };
1103 
1104 struct kvm_apic_map {
1105 	struct rcu_head rcu;
1106 	enum kvm_apic_logical_mode logical_mode;
1107 	u32 max_apic_id;
1108 	union {
1109 		struct kvm_lapic *xapic_flat_map[8];
1110 		struct kvm_lapic *xapic_cluster_map[16][4];
1111 	};
1112 	struct kvm_lapic *phys_map[];
1113 };
1114 
1115 /* Hyper-V synthetic debugger (SynDbg)*/
1116 struct kvm_hv_syndbg {
1117 	struct {
1118 		u64 control;
1119 		u64 status;
1120 		u64 send_page;
1121 		u64 recv_page;
1122 		u64 pending_page;
1123 	} control;
1124 	u64 options;
1125 };
1126 
1127 /* Current state of Hyper-V TSC page clocksource */
1128 enum hv_tsc_page_status {
1129 	/* TSC page was not set up or disabled */
1130 	HV_TSC_PAGE_UNSET = 0,
1131 	/* TSC page MSR was written by the guest, update pending */
1132 	HV_TSC_PAGE_GUEST_CHANGED,
1133 	/* TSC page update was triggered from the host side */
1134 	HV_TSC_PAGE_HOST_CHANGED,
1135 	/* TSC page was properly set up and is currently active  */
1136 	HV_TSC_PAGE_SET,
1137 	/* TSC page was set up with an inaccessible GPA */
1138 	HV_TSC_PAGE_BROKEN,
1139 };
1140 
1141 #ifdef CONFIG_KVM_HYPERV
1142 /* Hyper-V emulation context */
1143 struct kvm_hv {
1144 	struct mutex hv_lock;
1145 	u64 hv_guest_os_id;
1146 	u64 hv_hypercall;
1147 	u64 hv_tsc_page;
1148 	enum hv_tsc_page_status hv_tsc_page_status;
1149 
1150 	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1151 	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
1152 	u64 hv_crash_ctl;
1153 
1154 	struct ms_hyperv_tsc_page tsc_ref;
1155 
1156 	struct idr conn_to_evt;
1157 
1158 	u64 hv_reenlightenment_control;
1159 	u64 hv_tsc_emulation_control;
1160 	u64 hv_tsc_emulation_status;
1161 	u64 hv_invtsc_control;
1162 
1163 	/* How many vCPUs have VP index != vCPU index */
1164 	atomic_t num_mismatched_vp_indexes;
1165 
1166 	/*
1167 	 * How many SynICs use 'AutoEOI' feature
1168 	 * (protected by arch.apicv_update_lock)
1169 	 */
1170 	unsigned int synic_auto_eoi_used;
1171 
1172 	struct kvm_hv_syndbg hv_syndbg;
1173 
1174 	bool xsaves_xsavec_checked;
1175 };
1176 #endif
1177 
1178 struct msr_bitmap_range {
1179 	u32 flags;
1180 	u32 nmsrs;
1181 	u32 base;
1182 	unsigned long *bitmap;
1183 };
1184 
1185 #ifdef CONFIG_KVM_XEN
1186 /* Xen emulation context */
1187 struct kvm_xen {
1188 	struct mutex xen_lock;
1189 	u32 xen_version;
1190 	bool long_mode;
1191 	bool runstate_update_flag;
1192 	u8 upcall_vector;
1193 	struct gfn_to_pfn_cache shinfo_cache;
1194 	struct idr evtchn_ports;
1195 	unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
1196 
1197 	struct kvm_xen_hvm_config hvm_config;
1198 };
1199 #endif
1200 
1201 enum kvm_irqchip_mode {
1202 	KVM_IRQCHIP_NONE,
1203 	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
1204 	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
1205 };
1206 
1207 struct kvm_x86_msr_filter {
1208 	u8 count;
1209 	bool default_allow:1;
1210 	struct msr_bitmap_range ranges[16];
1211 };
1212 
1213 struct kvm_x86_pmu_event_filter {
1214 	__u32 action;
1215 	__u32 nevents;
1216 	__u32 fixed_counter_bitmap;
1217 	__u32 flags;
1218 	__u32 nr_includes;
1219 	__u32 nr_excludes;
1220 	__u64 *includes;
1221 	__u64 *excludes;
1222 	__u64 events[];
1223 };
1224 
1225 enum kvm_apicv_inhibit {
1226 
1227 	/********************************************************************/
1228 	/* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
1229 	/********************************************************************/
1230 
1231 	/*
1232 	 * APIC acceleration is disabled by a module parameter
1233 	 * and/or not supported in hardware.
1234 	 */
1235 	APICV_INHIBIT_REASON_DISABLED,
1236 
1237 	/*
1238 	 * APIC acceleration is inhibited because AutoEOI feature is
1239 	 * being used by a HyperV guest.
1240 	 */
1241 	APICV_INHIBIT_REASON_HYPERV,
1242 
1243 	/*
1244 	 * APIC acceleration is inhibited because the userspace didn't yet
1245 	 * enable the kernel/split irqchip.
1246 	 */
1247 	APICV_INHIBIT_REASON_ABSENT,
1248 
1249 	/* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
1250 	 * (out of band, debug measure of blocking all interrupts on this vCPU)
1251 	 * was enabled, to avoid AVIC/APICv bypassing it.
1252 	 */
1253 	APICV_INHIBIT_REASON_BLOCKIRQ,
1254 
1255 	/*
1256 	 * APICv is disabled because not all vCPUs have a 1:1 mapping between
1257 	 * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack.
1258 	 */
1259 	APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED,
1260 
1261 	/*
1262 	 * For simplicity, the APIC acceleration is inhibited
1263 	 * first time either APIC ID or APIC base are changed by the guest
1264 	 * from their reset values.
1265 	 */
1266 	APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
1267 	APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
1268 
1269 	/******************************************************/
1270 	/* INHIBITs that are relevant only to the AMD's AVIC. */
1271 	/******************************************************/
1272 
1273 	/*
1274 	 * AVIC is inhibited on a vCPU because it runs a nested guest.
1275 	 *
1276 	 * This is needed because unlike APICv, the peers of this vCPU
1277 	 * cannot use the doorbell mechanism to signal interrupts via AVIC when
1278 	 * a vCPU runs nested.
1279 	 */
1280 	APICV_INHIBIT_REASON_NESTED,
1281 
1282 	/*
1283 	 * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
1284 	 * which cannot be injected when the AVIC is enabled, thus AVIC
1285 	 * is inhibited while KVM waits for IRQ window.
1286 	 */
1287 	APICV_INHIBIT_REASON_IRQWIN,
1288 
1289 	/*
1290 	 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1291 	 * which AVIC doesn't support for edge triggered interrupts.
1292 	 */
1293 	APICV_INHIBIT_REASON_PIT_REINJ,
1294 
1295 	/*
1296 	 * AVIC is disabled because SEV doesn't support it.
1297 	 */
1298 	APICV_INHIBIT_REASON_SEV,
1299 
1300 	/*
1301 	 * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1
1302 	 * mapping between logical ID and vCPU.
1303 	 */
1304 	APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED,
1305 
1306 	NR_APICV_INHIBIT_REASONS,
1307 };
1308 
1309 #define __APICV_INHIBIT_REASON(reason)			\
1310 	{ BIT(APICV_INHIBIT_REASON_##reason), #reason }
1311 
1312 #define APICV_INHIBIT_REASONS				\
1313 	__APICV_INHIBIT_REASON(DISABLED),		\
1314 	__APICV_INHIBIT_REASON(HYPERV),			\
1315 	__APICV_INHIBIT_REASON(ABSENT),			\
1316 	__APICV_INHIBIT_REASON(BLOCKIRQ),		\
1317 	__APICV_INHIBIT_REASON(PHYSICAL_ID_ALIASED),	\
1318 	__APICV_INHIBIT_REASON(APIC_ID_MODIFIED),	\
1319 	__APICV_INHIBIT_REASON(APIC_BASE_MODIFIED),	\
1320 	__APICV_INHIBIT_REASON(NESTED),			\
1321 	__APICV_INHIBIT_REASON(IRQWIN),			\
1322 	__APICV_INHIBIT_REASON(PIT_REINJ),		\
1323 	__APICV_INHIBIT_REASON(SEV),			\
1324 	__APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED)
1325 
1326 struct kvm_arch {
1327 	unsigned long n_used_mmu_pages;
1328 	unsigned long n_requested_mmu_pages;
1329 	unsigned long n_max_mmu_pages;
1330 	unsigned int indirect_shadow_pages;
1331 	u8 mmu_valid_gen;
1332 	u8 vm_type;
1333 	bool has_private_mem;
1334 	bool has_protected_state;
1335 	bool pre_fault_allowed;
1336 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
1337 	struct list_head active_mmu_pages;
1338 	/*
1339 	 * A list of kvm_mmu_page structs that, if zapped, could possibly be
1340 	 * replaced by an NX huge page.  A shadow page is on this list if its
1341 	 * existence disallows an NX huge page (nx_huge_page_disallowed is set)
1342 	 * and there are no other conditions that prevent a huge page, e.g.
1343 	 * the backing host page is huge, dirtly logging is not enabled for its
1344 	 * memslot, etc...  Note, zapping shadow pages on this list doesn't
1345 	 * guarantee an NX huge page will be created in its stead, e.g. if the
1346 	 * guest attempts to execute from the region then KVM obviously can't
1347 	 * create an NX huge page (without hanging the guest).
1348 	 */
1349 	struct list_head possible_nx_huge_pages;
1350 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1351 	struct kvm_page_track_notifier_head track_notifier_head;
1352 #endif
1353 	/*
1354 	 * Protects marking pages unsync during page faults, as TDP MMU page
1355 	 * faults only take mmu_lock for read.  For simplicity, the unsync
1356 	 * pages lock is always taken when marking pages unsync regardless of
1357 	 * whether mmu_lock is held for read or write.
1358 	 */
1359 	spinlock_t mmu_unsync_pages_lock;
1360 
1361 	u64 shadow_mmio_value;
1362 
1363 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1364 	atomic_t noncoherent_dma_count;
1365 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1366 	atomic_t assigned_device_count;
1367 	struct kvm_pic *vpic;
1368 	struct kvm_ioapic *vioapic;
1369 	struct kvm_pit *vpit;
1370 	atomic_t vapics_in_nmi_mode;
1371 	struct mutex apic_map_lock;
1372 	struct kvm_apic_map __rcu *apic_map;
1373 	atomic_t apic_map_dirty;
1374 
1375 	bool apic_access_memslot_enabled;
1376 	bool apic_access_memslot_inhibited;
1377 
1378 	/* Protects apicv_inhibit_reasons */
1379 	struct rw_semaphore apicv_update_lock;
1380 	unsigned long apicv_inhibit_reasons;
1381 
1382 	gpa_t wall_clock;
1383 
1384 	bool mwait_in_guest;
1385 	bool hlt_in_guest;
1386 	bool pause_in_guest;
1387 	bool cstate_in_guest;
1388 
1389 	unsigned long irq_sources_bitmap;
1390 	s64 kvmclock_offset;
1391 
1392 	/*
1393 	 * This also protects nr_vcpus_matched_tsc which is read from a
1394 	 * preemption-disabled region, so it must be a raw spinlock.
1395 	 */
1396 	raw_spinlock_t tsc_write_lock;
1397 	u64 last_tsc_nsec;
1398 	u64 last_tsc_write;
1399 	u32 last_tsc_khz;
1400 	u64 last_tsc_offset;
1401 	u64 cur_tsc_nsec;
1402 	u64 cur_tsc_write;
1403 	u64 cur_tsc_offset;
1404 	u64 cur_tsc_generation;
1405 	int nr_vcpus_matched_tsc;
1406 
1407 	u32 default_tsc_khz;
1408 	bool user_set_tsc;
1409 	u64 apic_bus_cycle_ns;
1410 
1411 	seqcount_raw_spinlock_t pvclock_sc;
1412 	bool use_master_clock;
1413 	u64 master_kernel_ns;
1414 	u64 master_cycle_now;
1415 	struct delayed_work kvmclock_update_work;
1416 	struct delayed_work kvmclock_sync_work;
1417 
1418 	/* reads protected by irq_srcu, writes by irq_lock */
1419 	struct hlist_head mask_notifier_list;
1420 
1421 #ifdef CONFIG_KVM_HYPERV
1422 	struct kvm_hv hyperv;
1423 #endif
1424 
1425 #ifdef CONFIG_KVM_XEN
1426 	struct kvm_xen xen;
1427 #endif
1428 
1429 	bool backwards_tsc_observed;
1430 	bool boot_vcpu_runs_old_kvmclock;
1431 	u32 bsp_vcpu_id;
1432 
1433 	u64 disabled_quirks;
1434 
1435 	enum kvm_irqchip_mode irqchip_mode;
1436 	u8 nr_reserved_ioapic_pins;
1437 
1438 	bool disabled_lapic_found;
1439 
1440 	bool x2apic_format;
1441 	bool x2apic_broadcast_quirk_disabled;
1442 
1443 	bool guest_can_read_msr_platform_info;
1444 	bool exception_payload_enabled;
1445 
1446 	bool triple_fault_event;
1447 
1448 	bool bus_lock_detection_enabled;
1449 	bool enable_pmu;
1450 
1451 	u32 notify_window;
1452 	u32 notify_vmexit_flags;
1453 	/*
1454 	 * If exit_on_emulation_error is set, and the in-kernel instruction
1455 	 * emulator fails to emulate an instruction, allow userspace
1456 	 * the opportunity to look at it.
1457 	 */
1458 	bool exit_on_emulation_error;
1459 
1460 	/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1461 	u32 user_space_msr_mask;
1462 	struct kvm_x86_msr_filter __rcu *msr_filter;
1463 
1464 	u32 hypercall_exit_enabled;
1465 
1466 	/* Guest can access the SGX PROVISIONKEY. */
1467 	bool sgx_provisioning_allowed;
1468 
1469 	struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
1470 	struct vhost_task *nx_huge_page_recovery_thread;
1471 	u64 nx_huge_page_last;
1472 	struct once nx_once;
1473 
1474 #ifdef CONFIG_X86_64
1475 	/* The number of TDP MMU pages across all roots. */
1476 	atomic64_t tdp_mmu_pages;
1477 
1478 	/*
1479 	 * List of struct kvm_mmu_pages being used as roots.
1480 	 * All struct kvm_mmu_pages in the list should have
1481 	 * tdp_mmu_page set.
1482 	 *
1483 	 * For reads, this list is protected by:
1484 	 *	RCU alone or
1485 	 *	the MMU lock in read mode + RCU or
1486 	 *	the MMU lock in write mode
1487 	 *
1488 	 * For writes, this list is protected by tdp_mmu_pages_lock; see
1489 	 * below for the details.
1490 	 *
1491 	 * Roots will remain in the list until their tdp_mmu_root_count
1492 	 * drops to zero, at which point the thread that decremented the
1493 	 * count to zero should removed the root from the list and clean
1494 	 * it up, freeing the root after an RCU grace period.
1495 	 */
1496 	struct list_head tdp_mmu_roots;
1497 
1498 	/*
1499 	 * Protects accesses to the following fields when the MMU lock
1500 	 * is held in read mode:
1501 	 *  - tdp_mmu_roots (above)
1502 	 *  - the link field of kvm_mmu_page structs used by the TDP MMU
1503 	 *  - possible_nx_huge_pages;
1504 	 *  - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1505 	 *    by the TDP MMU
1506 	 * Because the lock is only taken within the MMU lock, strictly
1507 	 * speaking it is redundant to acquire this lock when the thread
1508 	 * holds the MMU lock in write mode.  However it often simplifies
1509 	 * the code to do so.
1510 	 */
1511 	spinlock_t tdp_mmu_pages_lock;
1512 #endif /* CONFIG_X86_64 */
1513 
1514 	/*
1515 	 * If set, at least one shadow root has been allocated. This flag
1516 	 * is used as one input when determining whether certain memslot
1517 	 * related allocations are necessary.
1518 	 */
1519 	bool shadow_root_allocated;
1520 
1521 #ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1522 	/*
1523 	 * If set, the VM has (or had) an external write tracking user, and
1524 	 * thus all write tracking metadata has been allocated, even if KVM
1525 	 * itself isn't using write tracking.
1526 	 */
1527 	bool external_write_tracking_enabled;
1528 #endif
1529 
1530 #if IS_ENABLED(CONFIG_HYPERV)
1531 	hpa_t	hv_root_tdp;
1532 	spinlock_t hv_root_tdp_lock;
1533 	struct hv_partition_assist_pg *hv_pa_pg;
1534 #endif
1535 	/*
1536 	 * VM-scope maximum vCPU ID. Used to determine the size of structures
1537 	 * that increase along with the maximum vCPU ID, in which case, using
1538 	 * the global KVM_MAX_VCPU_IDS may lead to significant memory waste.
1539 	 */
1540 	u32 max_vcpu_ids;
1541 
1542 	bool disable_nx_huge_pages;
1543 
1544 	/*
1545 	 * Memory caches used to allocate shadow pages when performing eager
1546 	 * page splitting. No need for a shadowed_info_cache since eager page
1547 	 * splitting only allocates direct shadow pages.
1548 	 *
1549 	 * Protected by kvm->slots_lock.
1550 	 */
1551 	struct kvm_mmu_memory_cache split_shadow_page_cache;
1552 	struct kvm_mmu_memory_cache split_page_header_cache;
1553 
1554 	/*
1555 	 * Memory cache used to allocate pte_list_desc structs while splitting
1556 	 * huge pages. In the worst case, to split one huge page, 512
1557 	 * pte_list_desc structs are needed to add each lower level leaf sptep
1558 	 * to the rmap plus 1 to extend the parent_ptes rmap of the lower level
1559 	 * page table.
1560 	 *
1561 	 * Protected by kvm->slots_lock.
1562 	 */
1563 #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1564 	struct kvm_mmu_memory_cache split_desc_cache;
1565 
1566 	gfn_t gfn_direct_bits;
1567 };
1568 
1569 struct kvm_vm_stat {
1570 	struct kvm_vm_stat_generic generic;
1571 	u64 mmu_shadow_zapped;
1572 	u64 mmu_pte_write;
1573 	u64 mmu_pde_zapped;
1574 	u64 mmu_flooded;
1575 	u64 mmu_recycled;
1576 	u64 mmu_cache_miss;
1577 	u64 mmu_unsync;
1578 	union {
1579 		struct {
1580 			atomic64_t pages_4k;
1581 			atomic64_t pages_2m;
1582 			atomic64_t pages_1g;
1583 		};
1584 		atomic64_t pages[KVM_NR_PAGE_SIZES];
1585 	};
1586 	u64 nx_lpage_splits;
1587 	u64 max_mmu_page_hash_collisions;
1588 	u64 max_mmu_rmap_size;
1589 };
1590 
1591 struct kvm_vcpu_stat {
1592 	struct kvm_vcpu_stat_generic generic;
1593 	u64 pf_taken;
1594 	u64 pf_fixed;
1595 	u64 pf_emulate;
1596 	u64 pf_spurious;
1597 	u64 pf_fast;
1598 	u64 pf_mmio_spte_created;
1599 	u64 pf_guest;
1600 	u64 tlb_flush;
1601 	u64 invlpg;
1602 
1603 	u64 exits;
1604 	u64 io_exits;
1605 	u64 mmio_exits;
1606 	u64 signal_exits;
1607 	u64 irq_window_exits;
1608 	u64 nmi_window_exits;
1609 	u64 l1d_flush;
1610 	u64 halt_exits;
1611 	u64 request_irq_exits;
1612 	u64 irq_exits;
1613 	u64 host_state_reload;
1614 	u64 fpu_reload;
1615 	u64 insn_emulation;
1616 	u64 insn_emulation_fail;
1617 	u64 hypercalls;
1618 	u64 irq_injections;
1619 	u64 nmi_injections;
1620 	u64 req_event;
1621 	u64 nested_run;
1622 	u64 directed_yield_attempted;
1623 	u64 directed_yield_successful;
1624 	u64 preemption_reported;
1625 	u64 preemption_other;
1626 	u64 guest_mode;
1627 	u64 notify_window_exits;
1628 };
1629 
1630 struct x86_instruction_info;
1631 
1632 struct msr_data {
1633 	bool host_initiated;
1634 	u32 index;
1635 	u64 data;
1636 };
1637 
1638 struct kvm_lapic_irq {
1639 	u32 vector;
1640 	u16 delivery_mode;
1641 	u16 dest_mode;
1642 	bool level;
1643 	u16 trig_mode;
1644 	u32 shorthand;
1645 	u32 dest_id;
1646 	bool msi_redir_hint;
1647 };
1648 
kvm_lapic_irq_dest_mode(bool dest_mode_logical)1649 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1650 {
1651 	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1652 }
1653 
1654 struct kvm_x86_ops {
1655 	const char *name;
1656 
1657 	int (*check_processor_compatibility)(void);
1658 
1659 	int (*enable_virtualization_cpu)(void);
1660 	void (*disable_virtualization_cpu)(void);
1661 	cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
1662 
1663 	void (*hardware_unsetup)(void);
1664 	bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1665 	void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1666 
1667 	unsigned int vm_size;
1668 	int (*vm_init)(struct kvm *kvm);
1669 	void (*vm_destroy)(struct kvm *kvm);
1670 
1671 	/* Create, but do not attach this VCPU */
1672 	int (*vcpu_precreate)(struct kvm *kvm);
1673 	int (*vcpu_create)(struct kvm_vcpu *vcpu);
1674 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1675 	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1676 
1677 	void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu);
1678 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1679 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
1680 
1681 	void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1682 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1683 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1684 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1685 	void (*get_segment)(struct kvm_vcpu *vcpu,
1686 			    struct kvm_segment *var, int seg);
1687 	int (*get_cpl)(struct kvm_vcpu *vcpu);
1688 	int (*get_cpl_no_cache)(struct kvm_vcpu *vcpu);
1689 	void (*set_segment)(struct kvm_vcpu *vcpu,
1690 			    struct kvm_segment *var, int seg);
1691 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1692 	bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1693 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1694 	void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1695 	bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1696 	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1697 	int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1698 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1699 	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1700 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1701 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1702 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1703 	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1704 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1705 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1706 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1707 	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1708 	bool (*get_if_flag)(struct kvm_vcpu *vcpu);
1709 
1710 	void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
1711 	void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1712 #if IS_ENABLED(CONFIG_HYPERV)
1713 	int  (*flush_remote_tlbs)(struct kvm *kvm);
1714 	int  (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
1715 					gfn_t nr_pages);
1716 #endif
1717 
1718 	/*
1719 	 * Flush any TLB entries associated with the given GVA.
1720 	 * Does not need to flush GPA->HPA mappings.
1721 	 * Can potentially get non-canonical addresses through INVLPGs, which
1722 	 * the implementation may choose to ignore if appropriate.
1723 	 */
1724 	void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1725 
1726 	/*
1727 	 * Flush any TLB entries created by the guest.  Like tlb_flush_gva(),
1728 	 * does not need to flush GPA->HPA mappings.
1729 	 */
1730 	void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
1731 
1732 	int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1733 	enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
1734 						  bool force_immediate_exit);
1735 	int (*handle_exit)(struct kvm_vcpu *vcpu,
1736 		enum exit_fastpath_completion exit_fastpath);
1737 	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1738 	void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1739 	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1740 	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1741 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1742 				unsigned char *hypercall_addr);
1743 	void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
1744 	void (*inject_nmi)(struct kvm_vcpu *vcpu);
1745 	void (*inject_exception)(struct kvm_vcpu *vcpu);
1746 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1747 	int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1748 	int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1749 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1750 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1751 	/* Whether or not a virtual NMI is pending in hardware. */
1752 	bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu);
1753 	/*
1754 	 * Attempt to pend a virtual NMI in hardware.  Returns %true on success
1755 	 * to allow using static_call_ret0 as the fallback.
1756 	 */
1757 	bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu);
1758 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1759 	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1760 	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1761 
1762 	const bool x2apic_icr_is_split;
1763 	const unsigned long required_apicv_inhibits;
1764 	bool allow_apicv_in_x2apic_without_x2apic_virtualization;
1765 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1766 	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1767 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1768 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1769 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1770 	void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
1771 				  int trig_mode, int vector);
1772 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1773 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1774 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1775 	u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1776 
1777 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1778 			     int root_level);
1779 
1780 	/* Update external mapping with page table link. */
1781 	int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1782 				void *external_spt);
1783 	/* Update the external page table from spte getting set. */
1784 	int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1785 				 kvm_pfn_t pfn_for_gfn);
1786 
1787 	/* Update external page tables for page table about to be freed. */
1788 	int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1789 				 void *external_spt);
1790 
1791 	/* Update external page table from spte getting removed, and flush TLB. */
1792 	int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1793 				    kvm_pfn_t pfn_for_gfn);
1794 
1795 	bool (*has_wbinvd_exit)(void);
1796 
1797 	u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1798 	u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1799 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
1800 	void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
1801 
1802 	/*
1803 	 * Retrieve somewhat arbitrary exit/entry information.  Intended to
1804 	 * be used only from within tracepoints or error paths.
1805 	 */
1806 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
1807 			      u64 *info1, u64 *info2,
1808 			      u32 *intr_info, u32 *error_code);
1809 
1810 	void (*get_entry_info)(struct kvm_vcpu *vcpu,
1811 			       u32 *intr_info, u32 *error_code);
1812 
1813 	int (*check_intercept)(struct kvm_vcpu *vcpu,
1814 			       struct x86_instruction_info *info,
1815 			       enum x86_intercept_stage stage,
1816 			       struct x86_exception *exception);
1817 	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1818 
1819 	/*
1820 	 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer.  A zero
1821 	 * value indicates CPU dirty logging is unsupported or disabled.
1822 	 */
1823 	int cpu_dirty_log_size;
1824 	void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
1825 
1826 	const struct kvm_x86_nested_ops *nested_ops;
1827 
1828 	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1829 	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1830 
1831 	int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
1832 			      uint32_t guest_irq, bool set);
1833 	void (*pi_start_assignment)(struct kvm *kvm);
1834 	void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
1835 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1836 	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1837 
1838 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1839 			    bool *expired);
1840 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1841 
1842 	void (*setup_mce)(struct kvm_vcpu *vcpu);
1843 
1844 #ifdef CONFIG_KVM_SMM
1845 	int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1846 	int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
1847 	int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
1848 	void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1849 #endif
1850 
1851 	int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
1852 	int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
1853 	int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1854 	int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1855 	int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1856 	int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1857 	void (*guest_memory_reclaimed)(struct kvm *kvm);
1858 
1859 	int (*get_feature_msr)(u32 msr, u64 *data);
1860 
1861 	int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1862 					 void *insn, int insn_len);
1863 
1864 	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1865 	int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
1866 
1867 	void (*migrate_timers)(struct kvm_vcpu *vcpu);
1868 	void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
1869 	int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1870 
1871 	void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1872 
1873 	/*
1874 	 * Returns vCPU specific APICv inhibit reasons
1875 	 */
1876 	unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
1877 
1878 	gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
1879 	void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
1880 	int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
1881 	void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
1882 	int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
1883 };
1884 
1885 struct kvm_x86_nested_ops {
1886 	void (*leave_nested)(struct kvm_vcpu *vcpu);
1887 	bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
1888 				    u32 error_code);
1889 	int (*check_events)(struct kvm_vcpu *vcpu);
1890 	bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
1891 	void (*triple_fault)(struct kvm_vcpu *vcpu);
1892 	int (*get_state)(struct kvm_vcpu *vcpu,
1893 			 struct kvm_nested_state __user *user_kvm_nested_state,
1894 			 unsigned user_data_size);
1895 	int (*set_state)(struct kvm_vcpu *vcpu,
1896 			 struct kvm_nested_state __user *user_kvm_nested_state,
1897 			 struct kvm_nested_state *kvm_state);
1898 	bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
1899 	int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1900 
1901 	int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1902 			    uint16_t *vmcs_version);
1903 	uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
1904 	void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu);
1905 };
1906 
1907 struct kvm_x86_init_ops {
1908 	int (*hardware_setup)(void);
1909 	unsigned int (*handle_intel_pt_intr)(void);
1910 
1911 	struct kvm_x86_ops *runtime_ops;
1912 	struct kvm_pmu_ops *pmu_ops;
1913 };
1914 
1915 struct kvm_arch_async_pf {
1916 	u32 token;
1917 	gfn_t gfn;
1918 	unsigned long cr3;
1919 	bool direct_map;
1920 	u64 error_code;
1921 };
1922 
1923 extern u32 __read_mostly kvm_nr_uret_msrs;
1924 extern bool __read_mostly allow_smaller_maxphyaddr;
1925 extern bool __read_mostly enable_apicv;
1926 extern struct kvm_x86_ops kvm_x86_ops;
1927 
1928 #define kvm_x86_call(func) static_call(kvm_x86_##func)
1929 #define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
1930 
1931 #define KVM_X86_OP(func) \
1932 	DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1933 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
1934 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
1935 #include <asm/kvm-x86-ops.h>
1936 
1937 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
1938 void kvm_x86_vendor_exit(void);
1939 
1940 #define __KVM_HAVE_ARCH_VM_ALLOC
kvm_arch_alloc_vm(void)1941 static inline struct kvm *kvm_arch_alloc_vm(void)
1942 {
1943 	return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1944 }
1945 
1946 #define __KVM_HAVE_ARCH_VM_FREE
1947 void kvm_arch_free_vm(struct kvm *kvm);
1948 
1949 #if IS_ENABLED(CONFIG_HYPERV)
1950 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
kvm_arch_flush_remote_tlbs(struct kvm * kvm)1951 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1952 {
1953 	if (kvm_x86_ops.flush_remote_tlbs &&
1954 	    !kvm_x86_call(flush_remote_tlbs)(kvm))
1955 		return 0;
1956 	else
1957 		return -ENOTSUPP;
1958 }
1959 
1960 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)1961 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
1962 						   u64 nr_pages)
1963 {
1964 	if (!kvm_x86_ops.flush_remote_tlbs_range)
1965 		return -EOPNOTSUPP;
1966 
1967 	return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages);
1968 }
1969 #endif /* CONFIG_HYPERV */
1970 
1971 enum kvm_intr_type {
1972 	/* Values are arbitrary, but must be non-zero. */
1973 	KVM_HANDLING_IRQ = 1,
1974 	KVM_HANDLING_NMI,
1975 };
1976 
1977 /* Enable perf NMI and timer modes to work, and minimise false positives. */
1978 #define kvm_arch_pmi_in_guest(vcpu) \
1979 	((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
1980 	 (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
1981 
1982 void __init kvm_mmu_x86_module_init(void);
1983 int kvm_mmu_vendor_module_init(void);
1984 void kvm_mmu_vendor_module_exit(void);
1985 
1986 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1987 int kvm_mmu_create(struct kvm_vcpu *vcpu);
1988 void kvm_mmu_init_vm(struct kvm *kvm);
1989 void kvm_mmu_uninit_vm(struct kvm *kvm);
1990 
1991 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
1992 					    struct kvm_memory_slot *slot);
1993 
1994 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
1995 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1996 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1997 				      const struct kvm_memory_slot *memslot,
1998 				      int start_level);
1999 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
2000 				       const struct kvm_memory_slot *memslot,
2001 				       int target_level);
2002 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
2003 				  const struct kvm_memory_slot *memslot,
2004 				  u64 start, u64 end,
2005 				  int target_level);
2006 void kvm_mmu_recover_huge_pages(struct kvm *kvm,
2007 				const struct kvm_memory_slot *memslot);
2008 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
2009 				   const struct kvm_memory_slot *memslot);
2010 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
2011 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
2012 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
2013 
2014 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
2015 
2016 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2017 			  const void *val, int bytes);
2018 
2019 struct kvm_irq_mask_notifier {
2020 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
2021 	int irq;
2022 	struct hlist_node link;
2023 };
2024 
2025 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
2026 				    struct kvm_irq_mask_notifier *kimn);
2027 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
2028 				      struct kvm_irq_mask_notifier *kimn);
2029 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
2030 			     bool mask);
2031 
2032 extern bool tdp_enabled;
2033 
2034 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
2035 
2036 /*
2037  * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
2038  *			userspace I/O) to indicate that the emulation context
2039  *			should be reused as is, i.e. skip initialization of
2040  *			emulation context, instruction fetch and decode.
2041  *
2042  * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
2043  *		      Indicates that only select instructions (tagged with
2044  *		      EmulateOnUD) should be emulated (to minimize the emulator
2045  *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
2046  *
2047  * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
2048  *		   decode the instruction length.  For use *only* by
2049  *		   kvm_x86_ops.skip_emulated_instruction() implementations if
2050  *		   EMULTYPE_COMPLETE_USER_EXIT is not set.
2051  *
2052  * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
2053  *			     retry native execution under certain conditions,
2054  *			     Can only be set in conjunction with EMULTYPE_PF.
2055  *
2056  * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
2057  *			     triggered by KVM's magic "force emulation" prefix,
2058  *			     which is opt in via module param (off by default).
2059  *			     Bypasses EmulateOnUD restriction despite emulating
2060  *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
2061  *			     Used to test the full emulator from userspace.
2062  *
2063  * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
2064  *			backdoor emulation, which is opt in via module param.
2065  *			VMware backdoor emulation handles select instructions
2066  *			and reinjects the #GP for all other cases.
2067  *
2068  * EMULTYPE_PF - Set when an intercepted #PF triggers the emulation, in which case
2069  *		 the CR2/GPA value pass on the stack is valid.
2070  *
2071  * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
2072  *				 state and inject single-step #DBs after skipping
2073  *				 an instruction (after completing userspace I/O).
2074  *
2075  * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
2076  *			     is attempting to write a gfn that contains one or
2077  *			     more of the PTEs used to translate the write itself,
2078  *			     and the owning page table is being shadowed by KVM.
2079  *			     If emulation of the faulting instruction fails and
2080  *			     this flag is set, KVM will exit to userspace instead
2081  *			     of retrying emulation as KVM cannot make forward
2082  *			     progress.
2083  *
2084  *			     If emulation fails for a write to guest page tables,
2085  *			     KVM unprotects (zaps) the shadow page for the target
2086  *			     gfn and resumes the guest to retry the non-emulatable
2087  *			     instruction (on hardware).  Unprotecting the gfn
2088  *			     doesn't allow forward progress for a self-changing
2089  *			     access because doing so also zaps the translation for
2090  *			     the gfn, i.e. retrying the instruction will hit a
2091  *			     !PRESENT fault, which results in a new shadow page
2092  *			     and sends KVM back to square one.
2093  */
2094 #define EMULTYPE_NO_DECODE	    (1 << 0)
2095 #define EMULTYPE_TRAP_UD	    (1 << 1)
2096 #define EMULTYPE_SKIP		    (1 << 2)
2097 #define EMULTYPE_ALLOW_RETRY_PF	    (1 << 3)
2098 #define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
2099 #define EMULTYPE_VMWARE_GP	    (1 << 5)
2100 #define EMULTYPE_PF		    (1 << 6)
2101 #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
2102 #define EMULTYPE_WRITE_PF_TO_SP	    (1 << 8)
2103 
kvm_can_emulate_event_vectoring(int emul_type)2104 static inline bool kvm_can_emulate_event_vectoring(int emul_type)
2105 {
2106 	return !(emul_type & EMULTYPE_PF);
2107 }
2108 
2109 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
2110 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
2111 					void *insn, int insn_len);
2112 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
2113 					  u64 *data, u8 ndata);
2114 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
2115 
2116 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
2117 
2118 void kvm_enable_efer_bits(u64);
2119 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2120 int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2121 int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
2122 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
2123 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2124 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
2125 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
2126 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
2127 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
2128 int kvm_emulate_invd(struct kvm_vcpu *vcpu);
2129 int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
2130 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
2131 int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
2132 
2133 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
2134 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
2135 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
2136 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
2137 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
2138 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
2139 
2140 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2141 void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2142 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2143 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
2144 
2145 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
2146 		    int reason, bool has_error_code, u32 error_code);
2147 
2148 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
2149 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
2150 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2151 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
2152 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2153 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
2154 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
2155 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
2156 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
2157 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
2158 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
2159 
2160 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2161 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2162 
2163 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
2164 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
2165 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
2166 
2167 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2168 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2169 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
2170 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
2171 			   bool has_error_code, u32 error_code);
2172 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
2173 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
2174 				    struct x86_exception *fault);
2175 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
2176 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
2177 
__kvm_irq_line_state(unsigned long * irq_state,int irq_source_id,int level)2178 static inline int __kvm_irq_line_state(unsigned long *irq_state,
2179 				       int irq_source_id, int level)
2180 {
2181 	/* Logical OR for level trig interrupt */
2182 	if (level)
2183 		__set_bit(irq_source_id, irq_state);
2184 	else
2185 		__clear_bit(irq_source_id, irq_state);
2186 
2187 	return !!(*irq_state);
2188 }
2189 
2190 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
2191 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
2192 
2193 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
2194 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
2195 
2196 void kvm_update_dr7(struct kvm_vcpu *vcpu);
2197 
2198 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
2199 				       bool always_retry);
2200 
kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa)2201 static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
2202 						   gpa_t cr2_or_gpa)
2203 {
2204 	return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
2205 }
2206 
2207 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
2208 			ulong roots_to_free);
2209 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
2210 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
2211 			      struct x86_exception *exception);
2212 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
2213 			       struct x86_exception *exception);
2214 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
2215 				struct x86_exception *exception);
2216 
2217 bool kvm_apicv_activated(struct kvm *kvm);
2218 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
2219 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
2220 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2221 				      enum kvm_apicv_inhibit reason, bool set);
2222 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2223 				    enum kvm_apicv_inhibit reason, bool set);
2224 
kvm_set_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2225 static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
2226 					 enum kvm_apicv_inhibit reason)
2227 {
2228 	kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
2229 }
2230 
kvm_clear_apicv_inhibit(struct kvm * kvm,enum kvm_apicv_inhibit reason)2231 static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
2232 					   enum kvm_apicv_inhibit reason)
2233 {
2234 	kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
2235 }
2236 
2237 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
2238 		       void *insn, int insn_len);
2239 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
2240 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
2241 void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2242 			     u64 addr, unsigned long roots);
2243 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
2244 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
2245 
2246 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
2247 		       int tdp_max_root_level, int tdp_huge_page_level);
2248 
2249 
2250 #ifdef CONFIG_KVM_PRIVATE_MEM
2251 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
2252 #else
2253 #define kvm_arch_has_private_mem(kvm) false
2254 #endif
2255 
2256 #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
2257 
kvm_read_ldt(void)2258 static inline u16 kvm_read_ldt(void)
2259 {
2260 	u16 ldt;
2261 	asm("sldt %0" : "=g"(ldt));
2262 	return ldt;
2263 }
2264 
kvm_load_ldt(u16 sel)2265 static inline void kvm_load_ldt(u16 sel)
2266 {
2267 	asm("lldt %0" : : "rm"(sel));
2268 }
2269 
2270 #ifdef CONFIG_X86_64
read_msr(unsigned long msr)2271 static inline unsigned long read_msr(unsigned long msr)
2272 {
2273 	u64 value;
2274 
2275 	rdmsrl(msr, value);
2276 	return value;
2277 }
2278 #endif
2279 
kvm_inject_gp(struct kvm_vcpu * vcpu,u32 error_code)2280 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
2281 {
2282 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2283 }
2284 
2285 #define TSS_IOPB_BASE_OFFSET 0x66
2286 #define TSS_BASE_SIZE 0x68
2287 #define TSS_IOPB_SIZE (65536 / 8)
2288 #define TSS_REDIRECTION_SIZE (256 / 8)
2289 #define RMODE_TSS_SIZE							\
2290 	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
2291 
2292 enum {
2293 	TASK_SWITCH_CALL = 0,
2294 	TASK_SWITCH_IRET = 1,
2295 	TASK_SWITCH_JMP = 2,
2296 	TASK_SWITCH_GATE = 3,
2297 };
2298 
2299 #define HF_GUEST_MASK		(1 << 0) /* VCPU is in guest-mode */
2300 
2301 #ifdef CONFIG_KVM_SMM
2302 #define HF_SMM_MASK		(1 << 1)
2303 #define HF_SMM_INSIDE_NMI_MASK	(1 << 2)
2304 
2305 # define KVM_MAX_NR_ADDRESS_SPACES	2
2306 /* SMM is currently unsupported for guests with private memory. */
2307 # define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
2308 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2309 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2310 #else
2311 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
2312 #endif
2313 
2314 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
2315 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
2316 int kvm_cpu_has_extint(struct kvm_vcpu *v);
2317 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2318 int kvm_cpu_get_extint(struct kvm_vcpu *v);
2319 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
2320 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2321 
2322 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
2323 		    unsigned long ipi_bitmap_high, u32 min,
2324 		    unsigned long icr, int op_64_bit);
2325 
2326 int kvm_add_user_return_msr(u32 msr);
2327 int kvm_find_user_return_msr(u32 msr);
2328 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2329 
kvm_is_supported_user_return_msr(u32 msr)2330 static inline bool kvm_is_supported_user_return_msr(u32 msr)
2331 {
2332 	return kvm_find_user_return_msr(msr) >= 0;
2333 }
2334 
2335 u64 kvm_scale_tsc(u64 tsc, u64 ratio);
2336 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
2337 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
2338 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
2339 
2340 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
2341 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
2342 
2343 void kvm_make_scan_ioapic_request(struct kvm *kvm);
2344 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
2345 				       unsigned long *vcpu_bitmap);
2346 
2347 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2348 				     struct kvm_async_pf *work);
2349 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2350 				 struct kvm_async_pf *work);
2351 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2352 			       struct kvm_async_pf *work);
2353 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
2354 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
2355 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
2356 
2357 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
2358 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2359 
2360 void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
2361 				     u32 size);
2362 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
2363 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
2364 
2365 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
2366 			     struct kvm_vcpu **dest_vcpu);
2367 
2368 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
2369 		     struct kvm_lapic_irq *irq);
2370 
kvm_irq_is_postable(struct kvm_lapic_irq * irq)2371 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
2372 {
2373 	/* We can only post Fixed and LowPrio IRQs */
2374 	return (irq->delivery_mode == APIC_DM_FIXED ||
2375 		irq->delivery_mode == APIC_DM_LOWEST);
2376 }
2377 
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)2378 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2379 {
2380 	kvm_x86_call(vcpu_blocking)(vcpu);
2381 }
2382 
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)2383 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2384 {
2385 	kvm_x86_call(vcpu_unblocking)(vcpu);
2386 }
2387 
kvm_cpu_get_apicid(int mps_cpu)2388 static inline int kvm_cpu_get_apicid(int mps_cpu)
2389 {
2390 #ifdef CONFIG_X86_LOCAL_APIC
2391 	return default_cpu_present_to_apicid(mps_cpu);
2392 #else
2393 	WARN_ON_ONCE(1);
2394 	return BAD_APICID;
2395 #endif
2396 }
2397 
2398 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
2399 
2400 #define KVM_CLOCK_VALID_FLAGS						\
2401 	(KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2402 
2403 #define KVM_X86_VALID_QUIRKS			\
2404 	(KVM_X86_QUIRK_LINT0_REENABLED |	\
2405 	 KVM_X86_QUIRK_CD_NW_CLEARED |		\
2406 	 KVM_X86_QUIRK_LAPIC_MMIO_HOLE |	\
2407 	 KVM_X86_QUIRK_OUT_7E_INC_RIP |		\
2408 	 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT |	\
2409 	 KVM_X86_QUIRK_FIX_HYPERCALL_INSN |	\
2410 	 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS |	\
2411 	 KVM_X86_QUIRK_SLOT_ZAP_ALL |		\
2412 	 KVM_X86_QUIRK_STUFF_FEATURE_MSRS)
2413 
2414 /*
2415  * KVM previously used a u32 field in kvm_run to indicate the hypercall was
2416  * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
2417  * remaining 31 lower bits must be 0 to preserve ABI.
2418  */
2419 #define KVM_EXIT_HYPERCALL_MBZ		GENMASK_ULL(31, 1)
2420 
2421 #endif /* _ASM_X86_KVM_HOST_H */
2422