xref: /linux/arch/x86/include/asm/kvm_host.h (revision c5951e7c8ee5cb04b8b41c32bf567b90117a2124)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This header defines architecture specific interfaces, x86 version
6  */
7 
8 #ifndef _ASM_X86_KVM_HOST_H
9 #define _ASM_X86_KVM_HOST_H
10 
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/tracepoint.h>
15 #include <linux/cpumask.h>
16 #include <linux/irq_work.h>
17 #include <linux/irq.h>
18 
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
21 #include <linux/kvm_types.h>
22 #include <linux/perf_event.h>
23 #include <linux/pvclock_gtod.h>
24 #include <linux/clocksource.h>
25 #include <linux/irqbypass.h>
26 #include <linux/hyperv.h>
27 
28 #include <asm/apic.h>
29 #include <asm/pvclock-abi.h>
30 #include <asm/desc.h>
31 #include <asm/mtrr.h>
32 #include <asm/msr-index.h>
33 #include <asm/asm.h>
34 #include <asm/kvm_page_track.h>
35 #include <asm/kvm_vcpu_regs.h>
36 #include <asm/hyperv-tlfs.h>
37 
38 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
39 
40 #define KVM_MAX_VCPUS 288
41 #define KVM_SOFT_MAX_VCPUS 240
42 #define KVM_MAX_VCPU_ID 1023
43 #define KVM_USER_MEM_SLOTS 509
44 /* memory slots that are not exposed to userspace */
45 #define KVM_PRIVATE_MEM_SLOTS 3
46 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47 
48 #define KVM_HALT_POLL_NS_DEFAULT 200000
49 
50 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
51 
52 /* x86-specific vcpu->requests bit members */
53 #define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
54 #define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
55 #define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
56 #define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
57 #define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58 #define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 #define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
60 #define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
61 #define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
62 #define KVM_REQ_NMI			KVM_ARCH_REQ(9)
63 #define KVM_REQ_PMU			KVM_ARCH_REQ(10)
64 #define KVM_REQ_PMI			KVM_ARCH_REQ(11)
65 #define KVM_REQ_SMI			KVM_ARCH_REQ(12)
66 #define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
67 #define KVM_REQ_MCLOCK_INPROGRESS \
68 	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
69 #define KVM_REQ_SCAN_IOAPIC \
70 	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
71 #define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
72 #define KVM_REQ_APIC_PAGE_RELOAD \
73 	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
74 #define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
75 #define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
76 #define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
77 #define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
78 #define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79 #define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80 #define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81 
82 #define CR0_RESERVED_BITS                                               \
83 	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
84 			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
85 			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
86 
87 #define CR4_RESERVED_BITS                                               \
88 	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
89 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
90 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
91 			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
92 			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
93 			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
94 
95 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
96 
97 
98 
99 #define INVALID_PAGE (~(hpa_t)0)
100 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
101 
102 #define UNMAPPED_GVA (~(gpa_t)0)
103 
104 /* KVM Hugepage definitions for x86 */
105 enum {
106 	PT_PAGE_TABLE_LEVEL   = 1,
107 	PT_DIRECTORY_LEVEL    = 2,
108 	PT_PDPE_LEVEL         = 3,
109 	/* set max level to the biggest one */
110 	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
111 };
112 #define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
113 				 PT_PAGE_TABLE_LEVEL + 1)
114 #define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
115 #define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
116 #define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
117 #define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
118 #define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
119 
120 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
121 {
122 	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
123 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
124 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
125 }
126 
127 #define KVM_PERMILLE_MMU_PAGES 20
128 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
129 #define KVM_MMU_HASH_SHIFT 12
130 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
131 #define KVM_MIN_FREE_MMU_PAGES 5
132 #define KVM_REFILL_PAGES 25
133 #define KVM_MAX_CPUID_ENTRIES 80
134 #define KVM_NR_FIXED_MTRR_REGION 88
135 #define KVM_NR_VAR_MTRR 8
136 
137 #define ASYNC_PF_PER_VCPU 64
138 
139 enum kvm_reg {
140 	VCPU_REGS_RAX = __VCPU_REGS_RAX,
141 	VCPU_REGS_RCX = __VCPU_REGS_RCX,
142 	VCPU_REGS_RDX = __VCPU_REGS_RDX,
143 	VCPU_REGS_RBX = __VCPU_REGS_RBX,
144 	VCPU_REGS_RSP = __VCPU_REGS_RSP,
145 	VCPU_REGS_RBP = __VCPU_REGS_RBP,
146 	VCPU_REGS_RSI = __VCPU_REGS_RSI,
147 	VCPU_REGS_RDI = __VCPU_REGS_RDI,
148 #ifdef CONFIG_X86_64
149 	VCPU_REGS_R8  = __VCPU_REGS_R8,
150 	VCPU_REGS_R9  = __VCPU_REGS_R9,
151 	VCPU_REGS_R10 = __VCPU_REGS_R10,
152 	VCPU_REGS_R11 = __VCPU_REGS_R11,
153 	VCPU_REGS_R12 = __VCPU_REGS_R12,
154 	VCPU_REGS_R13 = __VCPU_REGS_R13,
155 	VCPU_REGS_R14 = __VCPU_REGS_R14,
156 	VCPU_REGS_R15 = __VCPU_REGS_R15,
157 #endif
158 	VCPU_REGS_RIP,
159 	NR_VCPU_REGS,
160 
161 	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
162 	VCPU_EXREG_CR3,
163 	VCPU_EXREG_RFLAGS,
164 	VCPU_EXREG_SEGMENTS,
165 };
166 
167 enum {
168 	VCPU_SREG_ES,
169 	VCPU_SREG_CS,
170 	VCPU_SREG_SS,
171 	VCPU_SREG_DS,
172 	VCPU_SREG_FS,
173 	VCPU_SREG_GS,
174 	VCPU_SREG_TR,
175 	VCPU_SREG_LDTR,
176 };
177 
178 enum exit_fastpath_completion {
179 	EXIT_FASTPATH_NONE,
180 	EXIT_FASTPATH_SKIP_EMUL_INS,
181 };
182 
183 #include <asm/kvm_emulate.h>
184 
185 #define KVM_NR_MEM_OBJS 40
186 
187 #define KVM_NR_DB_REGS	4
188 
189 #define DR6_BD		(1 << 13)
190 #define DR6_BS		(1 << 14)
191 #define DR6_BT		(1 << 15)
192 #define DR6_RTM		(1 << 16)
193 #define DR6_FIXED_1	0xfffe0ff0
194 #define DR6_INIT	0xffff0ff0
195 #define DR6_VOLATILE	0x0001e00f
196 
197 #define DR7_BP_EN_MASK	0x000000ff
198 #define DR7_GE		(1 << 9)
199 #define DR7_GD		(1 << 13)
200 #define DR7_FIXED_1	0x00000400
201 #define DR7_VOLATILE	0xffff2bff
202 
203 #define PFERR_PRESENT_BIT 0
204 #define PFERR_WRITE_BIT 1
205 #define PFERR_USER_BIT 2
206 #define PFERR_RSVD_BIT 3
207 #define PFERR_FETCH_BIT 4
208 #define PFERR_PK_BIT 5
209 #define PFERR_GUEST_FINAL_BIT 32
210 #define PFERR_GUEST_PAGE_BIT 33
211 
212 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
213 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
214 #define PFERR_USER_MASK (1U << PFERR_USER_BIT)
215 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
216 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
217 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
218 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
219 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
220 
221 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
222 				 PFERR_WRITE_MASK |		\
223 				 PFERR_PRESENT_MASK)
224 
225 /* apic attention bits */
226 #define KVM_APIC_CHECK_VAPIC	0
227 /*
228  * The following bit is set with PV-EOI, unset on EOI.
229  * We detect PV-EOI changes by guest by comparing
230  * this bit with PV-EOI in guest memory.
231  * See the implementation in apic_update_pv_eoi.
232  */
233 #define KVM_APIC_PV_EOI_PENDING	1
234 
235 struct kvm_kernel_irq_routing_entry;
236 
237 /*
238  * We don't want allocation failures within the mmu code, so we preallocate
239  * enough memory for a single page fault in a cache.
240  */
241 struct kvm_mmu_memory_cache {
242 	int nobjs;
243 	void *objects[KVM_NR_MEM_OBJS];
244 };
245 
246 /*
247  * the pages used as guest page table on soft mmu are tracked by
248  * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
249  * by indirect shadow page can not be more than 15 bits.
250  *
251  * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
252  * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
253  */
254 union kvm_mmu_page_role {
255 	u32 word;
256 	struct {
257 		unsigned level:4;
258 		unsigned gpte_is_8_bytes:1;
259 		unsigned quadrant:2;
260 		unsigned direct:1;
261 		unsigned access:3;
262 		unsigned invalid:1;
263 		unsigned nxe:1;
264 		unsigned cr0_wp:1;
265 		unsigned smep_andnot_wp:1;
266 		unsigned smap_andnot_wp:1;
267 		unsigned ad_disabled:1;
268 		unsigned guest_mode:1;
269 		unsigned :6;
270 
271 		/*
272 		 * This is left at the top of the word so that
273 		 * kvm_memslots_for_spte_role can extract it with a
274 		 * simple shift.  While there is room, give it a whole
275 		 * byte so it is also faster to load it from memory.
276 		 */
277 		unsigned smm:8;
278 	};
279 };
280 
281 union kvm_mmu_extended_role {
282 /*
283  * This structure complements kvm_mmu_page_role caching everything needed for
284  * MMU configuration. If nothing in both these structures changed, MMU
285  * re-configuration can be skipped. @valid bit is set on first usage so we don't
286  * treat all-zero structure as valid data.
287  */
288 	u32 word;
289 	struct {
290 		unsigned int valid:1;
291 		unsigned int execonly:1;
292 		unsigned int cr0_pg:1;
293 		unsigned int cr4_pae:1;
294 		unsigned int cr4_pse:1;
295 		unsigned int cr4_pke:1;
296 		unsigned int cr4_smap:1;
297 		unsigned int cr4_smep:1;
298 		unsigned int cr4_la57:1;
299 		unsigned int maxphyaddr:6;
300 	};
301 };
302 
303 union kvm_mmu_role {
304 	u64 as_u64;
305 	struct {
306 		union kvm_mmu_page_role base;
307 		union kvm_mmu_extended_role ext;
308 	};
309 };
310 
311 struct kvm_rmap_head {
312 	unsigned long val;
313 };
314 
315 struct kvm_mmu_page {
316 	struct list_head link;
317 	struct hlist_node hash_link;
318 	struct list_head lpage_disallowed_link;
319 
320 	bool unsync;
321 	u8 mmu_valid_gen;
322 	bool mmio_cached;
323 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
324 
325 	/*
326 	 * The following two entries are used to key the shadow page in the
327 	 * hash table.
328 	 */
329 	union kvm_mmu_page_role role;
330 	gfn_t gfn;
331 
332 	u64 *spt;
333 	/* hold the gfn of each spte inside spt */
334 	gfn_t *gfns;
335 	int root_count;          /* Currently serving as active root */
336 	unsigned int unsync_children;
337 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
338 	DECLARE_BITMAP(unsync_child_bitmap, 512);
339 
340 #ifdef CONFIG_X86_32
341 	/*
342 	 * Used out of the mmu-lock to avoid reading spte values while an
343 	 * update is in progress; see the comments in __get_spte_lockless().
344 	 */
345 	int clear_spte_count;
346 #endif
347 
348 	/* Number of writes since the last time traversal visited this page.  */
349 	atomic_t write_flooding_count;
350 };
351 
352 struct kvm_pio_request {
353 	unsigned long linear_rip;
354 	unsigned long count;
355 	int in;
356 	int port;
357 	int size;
358 };
359 
360 #define PT64_ROOT_MAX_LEVEL 5
361 
362 struct rsvd_bits_validate {
363 	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
364 	u64 bad_mt_xwr;
365 };
366 
367 struct kvm_mmu_root_info {
368 	gpa_t cr3;
369 	hpa_t hpa;
370 };
371 
372 #define KVM_MMU_ROOT_INFO_INVALID \
373 	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
374 
375 #define KVM_MMU_NUM_PREV_ROOTS 3
376 
377 /*
378  * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
379  * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
380  * current mmu mode.
381  */
382 struct kvm_mmu {
383 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
384 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
385 	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
386 	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
387 			  bool prefault);
388 	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
389 				  struct x86_exception *fault);
390 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
391 			    u32 access, struct x86_exception *exception);
392 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
393 			       struct x86_exception *exception);
394 	int (*sync_page)(struct kvm_vcpu *vcpu,
395 			 struct kvm_mmu_page *sp);
396 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
397 	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
398 			   u64 *spte, const void *pte);
399 	hpa_t root_hpa;
400 	gpa_t root_cr3;
401 	union kvm_mmu_role mmu_role;
402 	u8 root_level;
403 	u8 shadow_root_level;
404 	u8 ept_ad;
405 	bool direct_map;
406 	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
407 
408 	/*
409 	 * Bitmap; bit set = permission fault
410 	 * Byte index: page fault error code [4:1]
411 	 * Bit index: pte permissions in ACC_* format
412 	 */
413 	u8 permissions[16];
414 
415 	/*
416 	* The pkru_mask indicates if protection key checks are needed.  It
417 	* consists of 16 domains indexed by page fault error code bits [4:1],
418 	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
419 	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
420 	*/
421 	u32 pkru_mask;
422 
423 	u64 *pae_root;
424 	u64 *lm_root;
425 
426 	/*
427 	 * check zero bits on shadow page table entries, these
428 	 * bits include not only hardware reserved bits but also
429 	 * the bits spte never used.
430 	 */
431 	struct rsvd_bits_validate shadow_zero_check;
432 
433 	struct rsvd_bits_validate guest_rsvd_check;
434 
435 	/* Can have large pages at levels 2..last_nonleaf_level-1. */
436 	u8 last_nonleaf_level;
437 
438 	bool nx;
439 
440 	u64 pdptrs[4]; /* pae */
441 };
442 
443 struct kvm_tlb_range {
444 	u64 start_gfn;
445 	u64 pages;
446 };
447 
448 enum pmc_type {
449 	KVM_PMC_GP = 0,
450 	KVM_PMC_FIXED,
451 };
452 
453 struct kvm_pmc {
454 	enum pmc_type type;
455 	u8 idx;
456 	u64 counter;
457 	u64 eventsel;
458 	struct perf_event *perf_event;
459 	struct kvm_vcpu *vcpu;
460 	/*
461 	 * eventsel value for general purpose counters,
462 	 * ctrl value for fixed counters.
463 	 */
464 	u64 current_config;
465 };
466 
467 struct kvm_pmu {
468 	unsigned nr_arch_gp_counters;
469 	unsigned nr_arch_fixed_counters;
470 	unsigned available_event_types;
471 	u64 fixed_ctr_ctrl;
472 	u64 global_ctrl;
473 	u64 global_status;
474 	u64 global_ovf_ctrl;
475 	u64 counter_bitmask[2];
476 	u64 global_ctrl_mask;
477 	u64 global_ovf_ctrl_mask;
478 	u64 reserved_bits;
479 	u8 version;
480 	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
481 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
482 	struct irq_work irq_work;
483 	DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
484 	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
485 	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
486 
487 	/*
488 	 * The gate to release perf_events not marked in
489 	 * pmc_in_use only once in a vcpu time slice.
490 	 */
491 	bool need_cleanup;
492 
493 	/*
494 	 * The total number of programmed perf_events and it helps to avoid
495 	 * redundant check before cleanup if guest don't use vPMU at all.
496 	 */
497 	u8 event_count;
498 };
499 
500 struct kvm_pmu_ops;
501 
502 enum {
503 	KVM_DEBUGREG_BP_ENABLED = 1,
504 	KVM_DEBUGREG_WONT_EXIT = 2,
505 	KVM_DEBUGREG_RELOAD = 4,
506 };
507 
508 struct kvm_mtrr_range {
509 	u64 base;
510 	u64 mask;
511 	struct list_head node;
512 };
513 
514 struct kvm_mtrr {
515 	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
516 	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
517 	u64 deftype;
518 
519 	struct list_head head;
520 };
521 
522 /* Hyper-V SynIC timer */
523 struct kvm_vcpu_hv_stimer {
524 	struct hrtimer timer;
525 	int index;
526 	union hv_stimer_config config;
527 	u64 count;
528 	u64 exp_time;
529 	struct hv_message msg;
530 	bool msg_pending;
531 };
532 
533 /* Hyper-V synthetic interrupt controller (SynIC)*/
534 struct kvm_vcpu_hv_synic {
535 	u64 version;
536 	u64 control;
537 	u64 msg_page;
538 	u64 evt_page;
539 	atomic64_t sint[HV_SYNIC_SINT_COUNT];
540 	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
541 	DECLARE_BITMAP(auto_eoi_bitmap, 256);
542 	DECLARE_BITMAP(vec_bitmap, 256);
543 	bool active;
544 	bool dont_zero_synic_pages;
545 };
546 
547 /* Hyper-V per vcpu emulation context */
548 struct kvm_vcpu_hv {
549 	u32 vp_index;
550 	u64 hv_vapic;
551 	s64 runtime_offset;
552 	struct kvm_vcpu_hv_synic synic;
553 	struct kvm_hyperv_exit exit;
554 	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
555 	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
556 	cpumask_t tlb_flush;
557 };
558 
559 struct kvm_vcpu_arch {
560 	/*
561 	 * rip and regs accesses must go through
562 	 * kvm_{register,rip}_{read,write} functions.
563 	 */
564 	unsigned long regs[NR_VCPU_REGS];
565 	u32 regs_avail;
566 	u32 regs_dirty;
567 
568 	unsigned long cr0;
569 	unsigned long cr0_guest_owned_bits;
570 	unsigned long cr2;
571 	unsigned long cr3;
572 	unsigned long cr4;
573 	unsigned long cr4_guest_owned_bits;
574 	unsigned long cr8;
575 	u32 pkru;
576 	u32 hflags;
577 	u64 efer;
578 	u64 apic_base;
579 	struct kvm_lapic *apic;    /* kernel irqchip context */
580 	bool apicv_active;
581 	bool load_eoi_exitmap_pending;
582 	DECLARE_BITMAP(ioapic_handled_vectors, 256);
583 	unsigned long apic_attention;
584 	int32_t apic_arb_prio;
585 	int mp_state;
586 	u64 ia32_misc_enable_msr;
587 	u64 smbase;
588 	u64 smi_count;
589 	bool tpr_access_reporting;
590 	bool xsaves_enabled;
591 	u64 ia32_xss;
592 	u64 microcode_version;
593 	u64 arch_capabilities;
594 
595 	/*
596 	 * Paging state of the vcpu
597 	 *
598 	 * If the vcpu runs in guest mode with two level paging this still saves
599 	 * the paging mode of the l1 guest. This context is always used to
600 	 * handle faults.
601 	 */
602 	struct kvm_mmu *mmu;
603 
604 	/* Non-nested MMU for L1 */
605 	struct kvm_mmu root_mmu;
606 
607 	/* L1 MMU when running nested */
608 	struct kvm_mmu guest_mmu;
609 
610 	/*
611 	 * Paging state of an L2 guest (used for nested npt)
612 	 *
613 	 * This context will save all necessary information to walk page tables
614 	 * of an L2 guest. This context is only initialized for page table
615 	 * walking and not for faulting since we never handle l2 page faults on
616 	 * the host.
617 	 */
618 	struct kvm_mmu nested_mmu;
619 
620 	/*
621 	 * Pointer to the mmu context currently used for
622 	 * gva_to_gpa translations.
623 	 */
624 	struct kvm_mmu *walk_mmu;
625 
626 	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
627 	struct kvm_mmu_memory_cache mmu_page_cache;
628 	struct kvm_mmu_memory_cache mmu_page_header_cache;
629 
630 	/*
631 	 * QEMU userspace and the guest each have their own FPU state.
632 	 * In vcpu_run, we switch between the user and guest FPU contexts.
633 	 * While running a VCPU, the VCPU thread will have the guest FPU
634 	 * context.
635 	 *
636 	 * Note that while the PKRU state lives inside the fpu registers,
637 	 * it is switched out separately at VMENTER and VMEXIT time. The
638 	 * "guest_fpu" state here contains the guest FPU context, with the
639 	 * host PRKU bits.
640 	 */
641 	struct fpu *user_fpu;
642 	struct fpu *guest_fpu;
643 
644 	u64 xcr0;
645 	u64 guest_supported_xcr0;
646 	u32 guest_xstate_size;
647 
648 	struct kvm_pio_request pio;
649 	void *pio_data;
650 
651 	u8 event_exit_inst_len;
652 
653 	struct kvm_queued_exception {
654 		bool pending;
655 		bool injected;
656 		bool has_error_code;
657 		u8 nr;
658 		u32 error_code;
659 		unsigned long payload;
660 		bool has_payload;
661 		u8 nested_apf;
662 	} exception;
663 
664 	struct kvm_queued_interrupt {
665 		bool injected;
666 		bool soft;
667 		u8 nr;
668 	} interrupt;
669 
670 	int halt_request; /* real mode on Intel only */
671 
672 	int cpuid_nent;
673 	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
674 
675 	int maxphyaddr;
676 
677 	/* emulate context */
678 
679 	struct x86_emulate_ctxt emulate_ctxt;
680 	bool emulate_regs_need_sync_to_vcpu;
681 	bool emulate_regs_need_sync_from_vcpu;
682 	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
683 
684 	gpa_t time;
685 	struct pvclock_vcpu_time_info hv_clock;
686 	unsigned int hw_tsc_khz;
687 	struct gfn_to_hva_cache pv_time;
688 	bool pv_time_enabled;
689 	/* set guest stopped flag in pvclock flags field */
690 	bool pvclock_set_guest_stopped_request;
691 
692 	struct {
693 		u8 preempted;
694 		u64 msr_val;
695 		u64 last_steal;
696 		struct gfn_to_pfn_cache cache;
697 	} st;
698 
699 	u64 tsc_offset;
700 	u64 last_guest_tsc;
701 	u64 last_host_tsc;
702 	u64 tsc_offset_adjustment;
703 	u64 this_tsc_nsec;
704 	u64 this_tsc_write;
705 	u64 this_tsc_generation;
706 	bool tsc_catchup;
707 	bool tsc_always_catchup;
708 	s8 virtual_tsc_shift;
709 	u32 virtual_tsc_mult;
710 	u32 virtual_tsc_khz;
711 	s64 ia32_tsc_adjust_msr;
712 	u64 msr_ia32_power_ctl;
713 	u64 tsc_scaling_ratio;
714 
715 	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
716 	unsigned nmi_pending; /* NMI queued after currently running handler */
717 	bool nmi_injected;    /* Trying to inject an NMI this entry */
718 	bool smi_pending;    /* SMI queued after currently running handler */
719 
720 	struct kvm_mtrr mtrr_state;
721 	u64 pat;
722 
723 	unsigned switch_db_regs;
724 	unsigned long db[KVM_NR_DB_REGS];
725 	unsigned long dr6;
726 	unsigned long dr7;
727 	unsigned long eff_db[KVM_NR_DB_REGS];
728 	unsigned long guest_debug_dr7;
729 	u64 msr_platform_info;
730 	u64 msr_misc_features_enables;
731 
732 	u64 mcg_cap;
733 	u64 mcg_status;
734 	u64 mcg_ctl;
735 	u64 mcg_ext_ctl;
736 	u64 *mce_banks;
737 
738 	/* Cache MMIO info */
739 	u64 mmio_gva;
740 	unsigned mmio_access;
741 	gfn_t mmio_gfn;
742 	u64 mmio_gen;
743 
744 	struct kvm_pmu pmu;
745 
746 	/* used for guest single stepping over the given code position */
747 	unsigned long singlestep_rip;
748 
749 	struct kvm_vcpu_hv hyperv;
750 
751 	cpumask_var_t wbinvd_dirty_mask;
752 
753 	unsigned long last_retry_eip;
754 	unsigned long last_retry_addr;
755 
756 	struct {
757 		bool halted;
758 		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
759 		struct gfn_to_hva_cache data;
760 		u64 msr_val;
761 		u32 id;
762 		bool send_user_only;
763 		u32 host_apf_reason;
764 		unsigned long nested_apf_token;
765 		bool delivery_as_pf_vmexit;
766 	} apf;
767 
768 	/* OSVW MSRs (AMD only) */
769 	struct {
770 		u64 length;
771 		u64 status;
772 	} osvw;
773 
774 	struct {
775 		u64 msr_val;
776 		struct gfn_to_hva_cache data;
777 	} pv_eoi;
778 
779 	u64 msr_kvm_poll_control;
780 
781 	/*
782 	 * Indicate whether the access faults on its page table in guest
783 	 * which is set when fix page fault and used to detect unhandeable
784 	 * instruction.
785 	 */
786 	bool write_fault_to_shadow_pgtable;
787 
788 	/* set at EPT violation at this point */
789 	unsigned long exit_qualification;
790 
791 	/* pv related host specific info */
792 	struct {
793 		bool pv_unhalted;
794 	} pv;
795 
796 	int pending_ioapic_eoi;
797 	int pending_external_vector;
798 
799 	/* GPA available */
800 	bool gpa_available;
801 	gpa_t gpa_val;
802 
803 	/* be preempted when it's in kernel-mode(cpl=0) */
804 	bool preempted_in_kernel;
805 
806 	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
807 	bool l1tf_flush_l1d;
808 
809 	/* AMD MSRC001_0015 Hardware Configuration */
810 	u64 msr_hwcr;
811 };
812 
813 struct kvm_lpage_info {
814 	int disallow_lpage;
815 };
816 
817 struct kvm_arch_memory_slot {
818 	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
819 	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
820 	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
821 };
822 
823 /*
824  * We use as the mode the number of bits allocated in the LDR for the
825  * logical processor ID.  It happens that these are all powers of two.
826  * This makes it is very easy to detect cases where the APICs are
827  * configured for multiple modes; in that case, we cannot use the map and
828  * hence cannot use kvm_irq_delivery_to_apic_fast either.
829  */
830 #define KVM_APIC_MODE_XAPIC_CLUSTER          4
831 #define KVM_APIC_MODE_XAPIC_FLAT             8
832 #define KVM_APIC_MODE_X2APIC                16
833 
834 struct kvm_apic_map {
835 	struct rcu_head rcu;
836 	u8 mode;
837 	u32 max_apic_id;
838 	union {
839 		struct kvm_lapic *xapic_flat_map[8];
840 		struct kvm_lapic *xapic_cluster_map[16][4];
841 	};
842 	struct kvm_lapic *phys_map[];
843 };
844 
845 /* Hyper-V emulation context */
846 struct kvm_hv {
847 	struct mutex hv_lock;
848 	u64 hv_guest_os_id;
849 	u64 hv_hypercall;
850 	u64 hv_tsc_page;
851 
852 	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
853 	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
854 	u64 hv_crash_ctl;
855 
856 	HV_REFERENCE_TSC_PAGE tsc_ref;
857 
858 	struct idr conn_to_evt;
859 
860 	u64 hv_reenlightenment_control;
861 	u64 hv_tsc_emulation_control;
862 	u64 hv_tsc_emulation_status;
863 
864 	/* How many vCPUs have VP index != vCPU index */
865 	atomic_t num_mismatched_vp_indexes;
866 
867 	struct hv_partition_assist_pg *hv_pa_pg;
868 };
869 
870 enum kvm_irqchip_mode {
871 	KVM_IRQCHIP_NONE,
872 	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
873 	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
874 };
875 
876 struct kvm_arch {
877 	unsigned long n_used_mmu_pages;
878 	unsigned long n_requested_mmu_pages;
879 	unsigned long n_max_mmu_pages;
880 	unsigned int indirect_shadow_pages;
881 	u8 mmu_valid_gen;
882 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
883 	/*
884 	 * Hash table of struct kvm_mmu_page.
885 	 */
886 	struct list_head active_mmu_pages;
887 	struct list_head zapped_obsolete_pages;
888 	struct list_head lpage_disallowed_mmu_pages;
889 	struct kvm_page_track_notifier_node mmu_sp_tracker;
890 	struct kvm_page_track_notifier_head track_notifier_head;
891 
892 	struct list_head assigned_dev_head;
893 	struct iommu_domain *iommu_domain;
894 	bool iommu_noncoherent;
895 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
896 	atomic_t noncoherent_dma_count;
897 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
898 	atomic_t assigned_device_count;
899 	struct kvm_pic *vpic;
900 	struct kvm_ioapic *vioapic;
901 	struct kvm_pit *vpit;
902 	atomic_t vapics_in_nmi_mode;
903 	struct mutex apic_map_lock;
904 	struct kvm_apic_map *apic_map;
905 
906 	bool apic_access_page_done;
907 
908 	gpa_t wall_clock;
909 
910 	bool mwait_in_guest;
911 	bool hlt_in_guest;
912 	bool pause_in_guest;
913 	bool cstate_in_guest;
914 
915 	unsigned long irq_sources_bitmap;
916 	s64 kvmclock_offset;
917 	raw_spinlock_t tsc_write_lock;
918 	u64 last_tsc_nsec;
919 	u64 last_tsc_write;
920 	u32 last_tsc_khz;
921 	u64 cur_tsc_nsec;
922 	u64 cur_tsc_write;
923 	u64 cur_tsc_offset;
924 	u64 cur_tsc_generation;
925 	int nr_vcpus_matched_tsc;
926 
927 	spinlock_t pvclock_gtod_sync_lock;
928 	bool use_master_clock;
929 	u64 master_kernel_ns;
930 	u64 master_cycle_now;
931 	struct delayed_work kvmclock_update_work;
932 	struct delayed_work kvmclock_sync_work;
933 
934 	struct kvm_xen_hvm_config xen_hvm_config;
935 
936 	/* reads protected by irq_srcu, writes by irq_lock */
937 	struct hlist_head mask_notifier_list;
938 
939 	struct kvm_hv hyperv;
940 
941 	#ifdef CONFIG_KVM_MMU_AUDIT
942 	int audit_point;
943 	#endif
944 
945 	bool backwards_tsc_observed;
946 	bool boot_vcpu_runs_old_kvmclock;
947 	u32 bsp_vcpu_id;
948 
949 	u64 disabled_quirks;
950 
951 	enum kvm_irqchip_mode irqchip_mode;
952 	u8 nr_reserved_ioapic_pins;
953 
954 	bool disabled_lapic_found;
955 
956 	bool x2apic_format;
957 	bool x2apic_broadcast_quirk_disabled;
958 
959 	bool guest_can_read_msr_platform_info;
960 	bool exception_payload_enabled;
961 
962 	struct kvm_pmu_event_filter *pmu_event_filter;
963 	struct task_struct *nx_lpage_recovery_thread;
964 };
965 
966 struct kvm_vm_stat {
967 	ulong mmu_shadow_zapped;
968 	ulong mmu_pte_write;
969 	ulong mmu_pte_updated;
970 	ulong mmu_pde_zapped;
971 	ulong mmu_flooded;
972 	ulong mmu_recycled;
973 	ulong mmu_cache_miss;
974 	ulong mmu_unsync;
975 	ulong remote_tlb_flush;
976 	ulong lpages;
977 	ulong nx_lpage_splits;
978 	ulong max_mmu_page_hash_collisions;
979 };
980 
981 struct kvm_vcpu_stat {
982 	u64 pf_fixed;
983 	u64 pf_guest;
984 	u64 tlb_flush;
985 	u64 invlpg;
986 
987 	u64 exits;
988 	u64 io_exits;
989 	u64 mmio_exits;
990 	u64 signal_exits;
991 	u64 irq_window_exits;
992 	u64 nmi_window_exits;
993 	u64 l1d_flush;
994 	u64 halt_exits;
995 	u64 halt_successful_poll;
996 	u64 halt_attempted_poll;
997 	u64 halt_poll_invalid;
998 	u64 halt_wakeup;
999 	u64 request_irq_exits;
1000 	u64 irq_exits;
1001 	u64 host_state_reload;
1002 	u64 fpu_reload;
1003 	u64 insn_emulation;
1004 	u64 insn_emulation_fail;
1005 	u64 hypercalls;
1006 	u64 irq_injections;
1007 	u64 nmi_injections;
1008 	u64 req_event;
1009 };
1010 
1011 struct x86_instruction_info;
1012 
1013 struct msr_data {
1014 	bool host_initiated;
1015 	u32 index;
1016 	u64 data;
1017 };
1018 
1019 struct kvm_lapic_irq {
1020 	u32 vector;
1021 	u16 delivery_mode;
1022 	u16 dest_mode;
1023 	bool level;
1024 	u16 trig_mode;
1025 	u32 shorthand;
1026 	u32 dest_id;
1027 	bool msi_redir_hint;
1028 };
1029 
1030 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1031 {
1032 	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1033 }
1034 
1035 struct kvm_x86_ops {
1036 	int (*cpu_has_kvm_support)(void);          /* __init */
1037 	int (*disabled_by_bios)(void);             /* __init */
1038 	int (*hardware_enable)(void);
1039 	void (*hardware_disable)(void);
1040 	int (*check_processor_compatibility)(void);/* __init */
1041 	int (*hardware_setup)(void);               /* __init */
1042 	void (*hardware_unsetup)(void);            /* __exit */
1043 	bool (*cpu_has_accelerated_tpr)(void);
1044 	bool (*has_emulated_msr)(int index);
1045 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1046 
1047 	struct kvm *(*vm_alloc)(void);
1048 	void (*vm_free)(struct kvm *);
1049 	int (*vm_init)(struct kvm *kvm);
1050 	void (*vm_destroy)(struct kvm *kvm);
1051 
1052 	/* Create, but do not attach this VCPU */
1053 	int (*vcpu_create)(struct kvm_vcpu *vcpu);
1054 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1055 	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1056 
1057 	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
1058 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1059 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
1060 
1061 	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1062 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1063 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1064 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1065 	void (*get_segment)(struct kvm_vcpu *vcpu,
1066 			    struct kvm_segment *var, int seg);
1067 	int (*get_cpl)(struct kvm_vcpu *vcpu);
1068 	void (*set_segment)(struct kvm_vcpu *vcpu,
1069 			    struct kvm_segment *var, int seg);
1070 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1071 	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1072 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
1073 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1074 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1075 	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1076 	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1077 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1078 	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1079 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1080 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1081 	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
1082 	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1083 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1084 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1085 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1086 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1087 	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1088 
1089 	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1090 	int  (*tlb_remote_flush)(struct kvm *kvm);
1091 	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
1092 			struct kvm_tlb_range *range);
1093 
1094 	/*
1095 	 * Flush any TLB entries associated with the given GVA.
1096 	 * Does not need to flush GPA->HPA mappings.
1097 	 * Can potentially get non-canonical addresses through INVLPGs, which
1098 	 * the implementation may choose to ignore if appropriate.
1099 	 */
1100 	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1101 
1102 	void (*run)(struct kvm_vcpu *vcpu);
1103 	int (*handle_exit)(struct kvm_vcpu *vcpu,
1104 		enum exit_fastpath_completion exit_fastpath);
1105 	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1106 	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1107 	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1108 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1109 				unsigned char *hypercall_addr);
1110 	void (*set_irq)(struct kvm_vcpu *vcpu);
1111 	void (*set_nmi)(struct kvm_vcpu *vcpu);
1112 	void (*queue_exception)(struct kvm_vcpu *vcpu);
1113 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1114 	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1115 	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
1116 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1117 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1118 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1119 	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1120 	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1121 	bool (*get_enable_apicv)(struct kvm *kvm);
1122 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1123 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1124 	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1125 	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1126 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1127 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1128 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1129 	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1130 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1131 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1132 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1133 	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1134 	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1135 	int (*get_lpage_level)(void);
1136 	bool (*rdtscp_supported)(void);
1137 	bool (*invpcid_supported)(void);
1138 
1139 	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1140 
1141 	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
1142 
1143 	bool (*has_wbinvd_exit)(void);
1144 
1145 	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1146 	/* Returns actual tsc_offset set in active VMCS */
1147 	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1148 
1149 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1150 
1151 	int (*check_intercept)(struct kvm_vcpu *vcpu,
1152 			       struct x86_instruction_info *info,
1153 			       enum x86_intercept_stage stage);
1154 	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
1155 		enum exit_fastpath_completion *exit_fastpath);
1156 	bool (*mpx_supported)(void);
1157 	bool (*xsaves_supported)(void);
1158 	bool (*umip_emulated)(void);
1159 	bool (*pt_supported)(void);
1160 	bool (*pku_supported)(void);
1161 
1162 	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1163 	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1164 
1165 	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1166 
1167 	/*
1168 	 * Arch-specific dirty logging hooks. These hooks are only supposed to
1169 	 * be valid if the specific arch has hardware-accelerated dirty logging
1170 	 * mechanism. Currently only for PML on VMX.
1171 	 *
1172 	 *  - slot_enable_log_dirty:
1173 	 *	called when enabling log dirty mode for the slot.
1174 	 *  - slot_disable_log_dirty:
1175 	 *	called when disabling log dirty mode for the slot.
1176 	 *	also called when slot is created with log dirty disabled.
1177 	 *  - flush_log_dirty:
1178 	 *	called before reporting dirty_bitmap to userspace.
1179 	 *  - enable_log_dirty_pt_masked:
1180 	 *	called when reenabling log dirty for the GFNs in the mask after
1181 	 *	corresponding bits are cleared in slot->dirty_bitmap.
1182 	 */
1183 	void (*slot_enable_log_dirty)(struct kvm *kvm,
1184 				      struct kvm_memory_slot *slot);
1185 	void (*slot_disable_log_dirty)(struct kvm *kvm,
1186 				       struct kvm_memory_slot *slot);
1187 	void (*flush_log_dirty)(struct kvm *kvm);
1188 	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
1189 					   struct kvm_memory_slot *slot,
1190 					   gfn_t offset, unsigned long mask);
1191 	int (*write_log_dirty)(struct kvm_vcpu *vcpu);
1192 
1193 	/* pmu operations of sub-arch */
1194 	const struct kvm_pmu_ops *pmu_ops;
1195 
1196 	/*
1197 	 * Architecture specific hooks for vCPU blocking due to
1198 	 * HLT instruction.
1199 	 * Returns for .pre_block():
1200 	 *    - 0 means continue to block the vCPU.
1201 	 *    - 1 means we cannot block the vCPU since some event
1202 	 *        happens during this period, such as, 'ON' bit in
1203 	 *        posted-interrupts descriptor is set.
1204 	 */
1205 	int (*pre_block)(struct kvm_vcpu *vcpu);
1206 	void (*post_block)(struct kvm_vcpu *vcpu);
1207 
1208 	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1209 	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1210 
1211 	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1212 			      uint32_t guest_irq, bool set);
1213 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1214 	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1215 
1216 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1217 			    bool *expired);
1218 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1219 
1220 	void (*setup_mce)(struct kvm_vcpu *vcpu);
1221 
1222 	int (*get_nested_state)(struct kvm_vcpu *vcpu,
1223 				struct kvm_nested_state __user *user_kvm_nested_state,
1224 				unsigned user_data_size);
1225 	int (*set_nested_state)(struct kvm_vcpu *vcpu,
1226 				struct kvm_nested_state __user *user_kvm_nested_state,
1227 				struct kvm_nested_state *kvm_state);
1228 	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1229 
1230 	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1231 	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1232 	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1233 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1234 
1235 	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1236 	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1237 	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1238 
1239 	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1240 
1241 	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
1242 				   uint16_t *vmcs_version);
1243 	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1244 
1245 	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1246 
1247 	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1248 	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1249 };
1250 
1251 struct kvm_arch_async_pf {
1252 	u32 token;
1253 	gfn_t gfn;
1254 	unsigned long cr3;
1255 	bool direct_map;
1256 };
1257 
1258 extern struct kvm_x86_ops *kvm_x86_ops;
1259 extern struct kmem_cache *x86_fpu_cache;
1260 
1261 #define __KVM_HAVE_ARCH_VM_ALLOC
1262 static inline struct kvm *kvm_arch_alloc_vm(void)
1263 {
1264 	return kvm_x86_ops->vm_alloc();
1265 }
1266 
1267 static inline void kvm_arch_free_vm(struct kvm *kvm)
1268 {
1269 	return kvm_x86_ops->vm_free(kvm);
1270 }
1271 
1272 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1273 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1274 {
1275 	if (kvm_x86_ops->tlb_remote_flush &&
1276 	    !kvm_x86_ops->tlb_remote_flush(kvm))
1277 		return 0;
1278 	else
1279 		return -ENOTSUPP;
1280 }
1281 
1282 int kvm_mmu_module_init(void);
1283 void kvm_mmu_module_exit(void);
1284 
1285 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1286 int kvm_mmu_create(struct kvm_vcpu *vcpu);
1287 void kvm_mmu_init_vm(struct kvm *kvm);
1288 void kvm_mmu_uninit_vm(struct kvm *kvm);
1289 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1290 		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1291 		u64 acc_track_mask, u64 me_mask);
1292 
1293 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1294 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1295 				      struct kvm_memory_slot *memslot);
1296 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1297 				   const struct kvm_memory_slot *memslot);
1298 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1299 				   struct kvm_memory_slot *memslot);
1300 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1301 					struct kvm_memory_slot *memslot);
1302 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1303 			    struct kvm_memory_slot *memslot);
1304 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1305 				   struct kvm_memory_slot *slot,
1306 				   gfn_t gfn_offset, unsigned long mask);
1307 void kvm_mmu_zap_all(struct kvm *kvm);
1308 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1309 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1310 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1311 
1312 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1313 bool pdptrs_changed(struct kvm_vcpu *vcpu);
1314 
1315 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1316 			  const void *val, int bytes);
1317 
1318 struct kvm_irq_mask_notifier {
1319 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1320 	int irq;
1321 	struct hlist_node link;
1322 };
1323 
1324 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1325 				    struct kvm_irq_mask_notifier *kimn);
1326 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1327 				      struct kvm_irq_mask_notifier *kimn);
1328 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1329 			     bool mask);
1330 
1331 extern bool tdp_enabled;
1332 
1333 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1334 
1335 /* control of guest tsc rate supported? */
1336 extern bool kvm_has_tsc_control;
1337 /* maximum supported tsc_khz for guests */
1338 extern u32  kvm_max_guest_tsc_khz;
1339 /* number of bits of the fractional part of the TSC scaling ratio */
1340 extern u8   kvm_tsc_scaling_ratio_frac_bits;
1341 /* maximum allowed value of TSC scaling ratio */
1342 extern u64  kvm_max_tsc_scaling_ratio;
1343 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1344 extern u64  kvm_default_tsc_scaling_ratio;
1345 
1346 extern u64 kvm_mce_cap_supported;
1347 
1348 /*
1349  * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1350  *			userspace I/O) to indicate that the emulation context
1351  *			should be resued as is, i.e. skip initialization of
1352  *			emulation context, instruction fetch and decode.
1353  *
1354  * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1355  *		      Indicates that only select instructions (tagged with
1356  *		      EmulateOnUD) should be emulated (to minimize the emulator
1357  *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
1358  *
1359  * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
1360  *		   decode the instruction length.  For use *only* by
1361  *		   kvm_x86_ops->skip_emulated_instruction() implementations.
1362  *
1363  * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
1364  *			  retry native execution under certain conditions.
1365  *
1366  * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
1367  *			     triggered by KVM's magic "force emulation" prefix,
1368  *			     which is opt in via module param (off by default).
1369  *			     Bypasses EmulateOnUD restriction despite emulating
1370  *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
1371  *			     Used to test the full emulator from userspace.
1372  *
1373  * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
1374  *			backdoor emulation, which is opt in via module param.
1375  *			VMware backoor emulation handles select instructions
1376  *			and reinjects the #GP for all other cases.
1377  */
1378 #define EMULTYPE_NO_DECODE	    (1 << 0)
1379 #define EMULTYPE_TRAP_UD	    (1 << 1)
1380 #define EMULTYPE_SKIP		    (1 << 2)
1381 #define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1382 #define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1383 #define EMULTYPE_VMWARE_GP	    (1 << 5)
1384 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1385 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1386 					void *insn, int insn_len);
1387 
1388 void kvm_enable_efer_bits(u64);
1389 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1390 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1391 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
1392 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1393 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
1394 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1395 
1396 struct x86_emulate_ctxt;
1397 
1398 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1399 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1400 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1401 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1402 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1403 
1404 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1405 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1406 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1407 
1408 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1409 		    int reason, bool has_error_code, u32 error_code);
1410 
1411 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1412 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1413 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1414 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1415 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1416 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1417 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1418 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1419 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1420 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1421 
1422 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1423 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1424 
1425 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1426 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1427 bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1428 
1429 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1430 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1431 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1432 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1433 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1434 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1435 			    gfn_t gfn, void *data, int offset, int len,
1436 			    u32 access);
1437 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1438 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1439 
1440 static inline int __kvm_irq_line_state(unsigned long *irq_state,
1441 				       int irq_source_id, int level)
1442 {
1443 	/* Logical OR for level trig interrupt */
1444 	if (level)
1445 		__set_bit(irq_source_id, irq_state);
1446 	else
1447 		__clear_bit(irq_source_id, irq_state);
1448 
1449 	return !!(*irq_state);
1450 }
1451 
1452 #define KVM_MMU_ROOT_CURRENT		BIT(0)
1453 #define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
1454 #define KVM_MMU_ROOTS_ALL		(~0UL)
1455 
1456 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1457 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1458 
1459 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1460 
1461 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1462 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1463 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1464 int kvm_mmu_load(struct kvm_vcpu *vcpu);
1465 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1466 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1467 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1468 			ulong roots_to_free);
1469 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1470 			   struct x86_exception *exception);
1471 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1472 			      struct x86_exception *exception);
1473 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1474 			       struct x86_exception *exception);
1475 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1476 			       struct x86_exception *exception);
1477 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1478 				struct x86_exception *exception);
1479 
1480 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1481 
1482 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1483 
1484 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
1485 		       void *insn, int insn_len);
1486 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1487 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1488 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1489 
1490 void kvm_enable_tdp(void);
1491 void kvm_disable_tdp(void);
1492 
1493 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1494 				  struct x86_exception *exception)
1495 {
1496 	return gpa;
1497 }
1498 
1499 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1500 {
1501 	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1502 
1503 	return (struct kvm_mmu_page *)page_private(page);
1504 }
1505 
1506 static inline u16 kvm_read_ldt(void)
1507 {
1508 	u16 ldt;
1509 	asm("sldt %0" : "=g"(ldt));
1510 	return ldt;
1511 }
1512 
1513 static inline void kvm_load_ldt(u16 sel)
1514 {
1515 	asm("lldt %0" : : "rm"(sel));
1516 }
1517 
1518 #ifdef CONFIG_X86_64
1519 static inline unsigned long read_msr(unsigned long msr)
1520 {
1521 	u64 value;
1522 
1523 	rdmsrl(msr, value);
1524 	return value;
1525 }
1526 #endif
1527 
1528 static inline u32 get_rdx_init_val(void)
1529 {
1530 	return 0x600; /* P6 family */
1531 }
1532 
1533 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1534 {
1535 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1536 }
1537 
1538 #define TSS_IOPB_BASE_OFFSET 0x66
1539 #define TSS_BASE_SIZE 0x68
1540 #define TSS_IOPB_SIZE (65536 / 8)
1541 #define TSS_REDIRECTION_SIZE (256 / 8)
1542 #define RMODE_TSS_SIZE							\
1543 	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1544 
1545 enum {
1546 	TASK_SWITCH_CALL = 0,
1547 	TASK_SWITCH_IRET = 1,
1548 	TASK_SWITCH_JMP = 2,
1549 	TASK_SWITCH_GATE = 3,
1550 };
1551 
1552 #define HF_GIF_MASK		(1 << 0)
1553 #define HF_HIF_MASK		(1 << 1)
1554 #define HF_VINTR_MASK		(1 << 2)
1555 #define HF_NMI_MASK		(1 << 3)
1556 #define HF_IRET_MASK		(1 << 4)
1557 #define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1558 #define HF_SMM_MASK		(1 << 6)
1559 #define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1560 
1561 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1562 #define KVM_ADDRESS_SPACE_NUM 2
1563 
1564 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1565 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1566 
1567 asmlinkage void kvm_spurious_fault(void);
1568 
1569 /*
1570  * Hardware virtualization extension instructions may fault if a
1571  * reboot turns off virtualization while processes are running.
1572  * Usually after catching the fault we just panic; during reboot
1573  * instead the instruction is ignored.
1574  */
1575 #define __kvm_handle_fault_on_reboot(insn)				\
1576 	"666: \n\t"							\
1577 	insn "\n\t"							\
1578 	"jmp	668f \n\t"						\
1579 	"667: \n\t"							\
1580 	"call	kvm_spurious_fault \n\t"				\
1581 	"668: \n\t"							\
1582 	_ASM_EXTABLE(666b, 667b)
1583 
1584 #define KVM_ARCH_WANT_MMU_NOTIFIER
1585 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1586 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1587 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1588 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1589 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1590 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1591 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1592 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1593 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1594 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1595 
1596 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1597 		    unsigned long ipi_bitmap_high, u32 min,
1598 		    unsigned long icr, int op_64_bit);
1599 
1600 void kvm_define_shared_msr(unsigned index, u32 msr);
1601 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1602 
1603 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1604 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1605 
1606 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1607 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1608 
1609 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1610 void kvm_make_scan_ioapic_request(struct kvm *kvm);
1611 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
1612 				       unsigned long *vcpu_bitmap);
1613 
1614 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1615 				     struct kvm_async_pf *work);
1616 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1617 				 struct kvm_async_pf *work);
1618 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1619 			       struct kvm_async_pf *work);
1620 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1621 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1622 
1623 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1624 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1625 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1626 
1627 int kvm_is_in_guest(void);
1628 
1629 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1630 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1631 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1632 
1633 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1634 			     struct kvm_vcpu **dest_vcpu);
1635 
1636 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1637 		     struct kvm_lapic_irq *irq);
1638 
1639 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
1640 {
1641 	/* We can only post Fixed and LowPrio IRQs */
1642 	return (irq->delivery_mode == dest_Fixed ||
1643 		irq->delivery_mode == dest_LowestPrio);
1644 }
1645 
1646 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1647 {
1648 	if (kvm_x86_ops->vcpu_blocking)
1649 		kvm_x86_ops->vcpu_blocking(vcpu);
1650 }
1651 
1652 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1653 {
1654 	if (kvm_x86_ops->vcpu_unblocking)
1655 		kvm_x86_ops->vcpu_unblocking(vcpu);
1656 }
1657 
1658 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1659 
1660 static inline int kvm_cpu_get_apicid(int mps_cpu)
1661 {
1662 #ifdef CONFIG_X86_LOCAL_APIC
1663 	return default_cpu_present_to_apicid(mps_cpu);
1664 #else
1665 	WARN_ON_ONCE(1);
1666 	return BAD_APICID;
1667 #endif
1668 }
1669 
1670 #define put_smstate(type, buf, offset, val)                      \
1671 	*(type *)((buf) + (offset) - 0x7e00) = val
1672 
1673 #define GET_SMSTATE(type, buf, offset)		\
1674 	(*(type *)((buf) + (offset) - 0x7e00))
1675 
1676 #endif /* _ASM_X86_KVM_HOST_H */
1677