xref: /linux/arch/x86/kvm/x86.h (revision bf2c3138ae3694d4687cbe451c774c288ae2ad06)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
4 
5 #include <linux/kvm_host.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/mce.h>
8 #include <asm/pvclock.h>
9 #include "kvm_cache_regs.h"
10 #include "kvm_emulate.h"
11 #include "cpuid.h"
12 
13 #define KVM_MAX_MCE_BANKS 32
14 
15 struct kvm_caps {
16 	/* control of guest tsc rate supported? */
17 	bool has_tsc_control;
18 	/* maximum supported tsc_khz for guests */
19 	u32  max_guest_tsc_khz;
20 	/* number of bits of the fractional part of the TSC scaling ratio */
21 	u8   tsc_scaling_ratio_frac_bits;
22 	/* maximum allowed value of TSC scaling ratio */
23 	u64  max_tsc_scaling_ratio;
24 	/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
25 	u64  default_tsc_scaling_ratio;
26 	/* bus lock detection supported? */
27 	bool has_bus_lock_exit;
28 	/* notify VM exit supported? */
29 	bool has_notify_vmexit;
30 	/* bit mask of VM types */
31 	u32 supported_vm_types;
32 
33 	u64 supported_mce_cap;
34 	u64 supported_xcr0;
35 	u64 supported_xss;
36 	u64 supported_perf_cap;
37 
38 	u64 supported_quirks;
39 	u64 inapplicable_quirks;
40 };
41 
42 struct kvm_host_values {
43 	/*
44 	 * The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
45 	 * address bits irrespective of features that repurpose legal bits,
46 	 * e.g. MKTME.
47 	 */
48 	u8 maxphyaddr;
49 
50 	u64 efer;
51 	u64 xcr0;
52 	u64 xss;
53 	u64 s_cet;
54 	u64 arch_capabilities;
55 };
56 
57 void kvm_spurious_fault(void);
58 
59 #define SIZE_OF_MEMSLOTS_HASHTABLE \
60 	(sizeof(((struct kvm_memslots *)0)->id_hash) * 2 * KVM_MAX_NR_ADDRESS_SPACES)
61 
62 /* Sanity check the size of the memslot hash tables. */
63 static_assert(SIZE_OF_MEMSLOTS_HASHTABLE ==
64 	      (1024 * (1 + IS_ENABLED(CONFIG_X86_64)) * (1 + IS_ENABLED(CONFIG_KVM_SMM))));
65 
66 /*
67  * Assert that "struct kvm_{svm,vmx,tdx}" is an order-0 or order-1 allocation.
68  * Spilling over to an order-2 allocation isn't fundamentally problematic, but
69  * isn't expected to happen in the foreseeable future (O(years)).  Assert that
70  * the size is an order-0 allocation when ignoring the memslot hash tables, to
71  * help detect and debug unexpected size increases.
72  */
73 #define KVM_SANITY_CHECK_VM_STRUCT_SIZE(x)						\
74 do {											\
75 	BUILD_BUG_ON(get_order(sizeof(struct x) - SIZE_OF_MEMSLOTS_HASHTABLE) &&	\
76 		     !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN));	\
77 	BUILD_BUG_ON(get_order(sizeof(struct x)) > 1 &&					\
78 		     !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN));	\
79 } while (0)
80 
81 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
82 ({									\
83 	bool failed = (consistency_check);				\
84 	if (failed)							\
85 		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
86 	failed;								\
87 })
88 
89 /*
90  * The first...last VMX feature MSRs that are emulated by KVM.  This may or may
91  * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
92  * associated feature that KVM supports for nested virtualization.
93  */
94 #define KVM_FIRST_EMULATED_VMX_MSR	MSR_IA32_VMX_BASIC
95 #define KVM_LAST_EMULATED_VMX_MSR	MSR_IA32_VMX_VMFUNC
96 
97 #define KVM_DEFAULT_PLE_GAP		128
98 #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
99 #define KVM_DEFAULT_PLE_WINDOW_GROW	2
100 #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
101 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
102 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
103 #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
104 
105 /*
106  * KVM's internal, non-ABI indices for synthetic MSRs. The values themselves
107  * are arbitrary and have no meaning, the only requirement is that they don't
108  * conflict with "real" MSRs that KVM supports. Use values at the upper end
109  * of KVM's reserved paravirtual MSR range to minimize churn, i.e. these values
110  * will be usable until KVM exhausts its supply of paravirtual MSR indices.
111  */
112 
113 #define MSR_KVM_INTERNAL_GUEST_SSP	0x4b564dff
114 
115 static inline unsigned int __grow_ple_window(unsigned int val,
116 		unsigned int base, unsigned int modifier, unsigned int max)
117 {
118 	u64 ret = val;
119 
120 	if (modifier < 1)
121 		return base;
122 
123 	if (modifier < base)
124 		ret *= modifier;
125 	else
126 		ret += modifier;
127 
128 	return min(ret, (u64)max);
129 }
130 
131 static inline unsigned int __shrink_ple_window(unsigned int val,
132 		unsigned int base, unsigned int modifier, unsigned int min)
133 {
134 	if (modifier < 1)
135 		return base;
136 
137 	if (modifier < base)
138 		val /= modifier;
139 	else
140 		val -= modifier;
141 
142 	return max(val, min);
143 }
144 
145 #define MSR_IA32_CR_PAT_DEFAULT	\
146 	PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
147 
148 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
149 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
150 
151 /* Forcibly leave the nested mode in cases like a vCPU reset */
152 static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
153 {
154 	kvm_x86_ops.nested_ops->leave_nested(vcpu);
155 }
156 
157 /*
158  * If IBRS is advertised to the vCPU, KVM must flush the indirect branch
159  * predictors when transitioning from L2 to L1, as L1 expects hardware (KVM in
160  * this case) to provide separate predictor modes.  Bare metal isolates the host
161  * from the guest, but doesn't isolate different guests from one another (in
162  * this case L1 and L2). The exception is if bare metal supports same mode IBRS,
163  * which offers protection within the same mode, and hence protects L1 from L2.
164  */
165 static inline void kvm_nested_vmexit_handle_ibrs(struct kvm_vcpu *vcpu)
166 {
167 	if (cpu_feature_enabled(X86_FEATURE_AMD_IBRS_SAME_MODE))
168 		return;
169 
170 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
171 	    guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS))
172 		indirect_branch_prediction_barrier();
173 }
174 
175 /*
176  * Disallow modifying CPUID and feature MSRs, which affect the core virtual CPU
177  * model exposed to the guest and virtualized by KVM, if the vCPU has already
178  * run or is in guest mode (L2).  In both cases, KVM has already consumed the
179  * current virtual CPU model, and doesn't support "unwinding" to react to the
180  * new model.
181  *
182  * Note, the only way is_guest_mode() can be true with 'last_vmentry_cpu == -1'
183  * is if userspace sets CPUID and feature MSRs (to enable VMX/SVM), then sets
184  * nested state, and then attempts to set CPUID and/or feature MSRs *again*.
185  */
186 static inline bool kvm_can_set_cpuid_and_feature_msrs(struct kvm_vcpu *vcpu)
187 {
188 	return vcpu->arch.last_vmentry_cpu == -1 && !is_guest_mode(vcpu);
189 }
190 
191 static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state)
192 {
193 	vcpu->arch.mp_state = mp_state;
194 	if (mp_state == KVM_MP_STATE_RUNNABLE)
195 		vcpu->arch.pv.pv_unhalted = false;
196 }
197 
198 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
199 {
200 	return vcpu->arch.exception.pending ||
201 	       vcpu->arch.exception_vmexit.pending ||
202 	       kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
203 }
204 
205 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
206 {
207 	vcpu->arch.exception.pending = false;
208 	vcpu->arch.exception.injected = false;
209 	vcpu->arch.exception_vmexit.pending = false;
210 }
211 
212 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
213 	bool soft)
214 {
215 	vcpu->arch.interrupt.injected = true;
216 	vcpu->arch.interrupt.soft = soft;
217 	vcpu->arch.interrupt.nr = vector;
218 }
219 
220 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
221 {
222 	vcpu->arch.interrupt.injected = false;
223 }
224 
225 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
226 {
227 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
228 		vcpu->arch.nmi_injected;
229 }
230 
231 static inline bool kvm_exception_is_soft(unsigned int nr)
232 {
233 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
234 }
235 
236 static inline bool is_protmode(struct kvm_vcpu *vcpu)
237 {
238 	return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
239 }
240 
241 static inline bool is_long_mode(struct kvm_vcpu *vcpu)
242 {
243 #ifdef CONFIG_X86_64
244 	return !!(vcpu->arch.efer & EFER_LMA);
245 #else
246 	return false;
247 #endif
248 }
249 
250 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
251 {
252 	int cs_db, cs_l;
253 
254 	WARN_ON_ONCE(vcpu->arch.guest_state_protected);
255 
256 	if (!is_long_mode(vcpu))
257 		return false;
258 	kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
259 	return cs_l;
260 }
261 
262 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
263 {
264 	/*
265 	 * If running with protected guest state, the CS register is not
266 	 * accessible. The hypercall register values will have had to been
267 	 * provided in 64-bit mode, so assume the guest is in 64-bit.
268 	 */
269 	return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
270 }
271 
272 static inline bool x86_exception_has_error_code(unsigned int vector)
273 {
274 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
275 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
276 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
277 
278 	return (1U << vector) & exception_has_error_code;
279 }
280 
281 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
282 {
283 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
284 }
285 
286 static inline bool is_pae(struct kvm_vcpu *vcpu)
287 {
288 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
289 }
290 
291 static inline bool is_pse(struct kvm_vcpu *vcpu)
292 {
293 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
294 }
295 
296 static inline bool is_paging(struct kvm_vcpu *vcpu)
297 {
298 	return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
299 }
300 
301 static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
302 {
303 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
304 }
305 
306 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
307 {
308 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
309 }
310 
311 static inline u8 max_host_virt_addr_bits(void)
312 {
313 	return kvm_cpu_cap_has(X86_FEATURE_LA57) ? 57 : 48;
314 }
315 
316 /*
317  * x86 MSRs which contain linear addresses, x86 hidden segment bases, and
318  * IDT/GDT bases have static canonicality checks, the size of which depends
319  * only on the CPU's support for 5-level paging, rather than on the state of
320  * CR4.LA57.  This applies to both WRMSR and to other instructions that set
321  * their values, e.g. SGDT.
322  *
323  * KVM passes through most of these MSRS and also doesn't intercept the
324  * instructions that set the hidden segment bases.
325  *
326  * Because of this, to be consistent with hardware, even if the guest doesn't
327  * have LA57 enabled in its CPUID, perform canonicality checks based on *host*
328  * support for 5 level paging.
329  *
330  * Finally, instructions which are related to MMU invalidation of a given
331  * linear address, also have a similar static canonical check on address.
332  * This allows for example to invalidate 5-level addresses of a guest from a
333  * host which uses 4-level paging.
334  */
335 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu,
336 					   unsigned int flags)
337 {
338 	if (flags & (X86EMUL_F_INVLPG | X86EMUL_F_MSR | X86EMUL_F_DT_LOAD))
339 		return !__is_canonical_address(la, max_host_virt_addr_bits());
340 	else
341 		return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
342 }
343 
344 static inline bool is_noncanonical_msr_address(u64 la, struct kvm_vcpu *vcpu)
345 {
346 	return is_noncanonical_address(la, vcpu, X86EMUL_F_MSR);
347 }
348 
349 static inline bool is_noncanonical_base_address(u64 la, struct kvm_vcpu *vcpu)
350 {
351 	return is_noncanonical_address(la, vcpu, X86EMUL_F_DT_LOAD);
352 }
353 
354 static inline bool is_noncanonical_invlpg_address(u64 la, struct kvm_vcpu *vcpu)
355 {
356 	return is_noncanonical_address(la, vcpu, X86EMUL_F_INVLPG);
357 }
358 
359 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
360 					gva_t gva, gfn_t gfn, unsigned access)
361 {
362 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
363 
364 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
365 		return;
366 
367 	/*
368 	 * If this is a shadow nested page table, the "GVA" is
369 	 * actually a nGPA.
370 	 */
371 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
372 	vcpu->arch.mmio_access = access;
373 	vcpu->arch.mmio_gfn = gfn;
374 	vcpu->arch.mmio_gen = gen;
375 }
376 
377 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
378 {
379 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
380 }
381 
382 /*
383  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
384  * clear all mmio cache info.
385  */
386 #define MMIO_GVA_ANY (~(gva_t)0)
387 
388 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
389 {
390 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
391 		return;
392 
393 	vcpu->arch.mmio_gva = 0;
394 }
395 
396 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
397 {
398 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
399 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
400 		return true;
401 
402 	return false;
403 }
404 
405 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
406 {
407 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
408 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
409 		return true;
410 
411 	return false;
412 }
413 
414 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
415 {
416 	unsigned long val = kvm_register_read_raw(vcpu, reg);
417 
418 	return is_64_bit_mode(vcpu) ? val : (u32)val;
419 }
420 
421 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
422 				       int reg, unsigned long val)
423 {
424 	if (!is_64_bit_mode(vcpu))
425 		val = (u32)val;
426 	return kvm_register_write_raw(vcpu, reg, val);
427 }
428 
429 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
430 {
431 	return !(kvm->arch.disabled_quirks & quirk);
432 }
433 
434 static __always_inline void kvm_request_l1tf_flush_l1d(void)
435 {
436 #if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
437 	/*
438 	 * Use a raw write to set the per-CPU flag, as KVM will ensure a flush
439 	 * even if preemption is currently enabled..  If the current vCPU task
440 	 * is migrated to a different CPU (or userspace runs the vCPU on a
441 	 * different task) before the next VM-Entry, then kvm_arch_vcpu_load()
442 	 * will request a flush on the new CPU.
443 	 */
444 	raw_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
445 #endif
446 }
447 
448 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
449 
450 u64 get_kvmclock_ns(struct kvm *kvm);
451 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
452 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
453 int kvm_guest_time_update(struct kvm_vcpu *v);
454 
455 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
456 	gva_t addr, void *val, unsigned int bytes,
457 	struct x86_exception *exception);
458 
459 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
460 	gva_t addr, void *val, unsigned int bytes,
461 	struct x86_exception *exception);
462 
463 int handle_ud(struct kvm_vcpu *vcpu);
464 
465 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
466 				   struct kvm_queued_exception *ex);
467 
468 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
469 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
470 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
471 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
472 				    void *insn, int insn_len);
473 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
474 			    int emulation_type, void *insn, int insn_len);
475 fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu);
476 fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
477 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
478 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu);
479 
480 extern struct kvm_caps kvm_caps;
481 extern struct kvm_host_values kvm_host;
482 
483 extern bool enable_pmu;
484 extern bool enable_mediated_pmu;
485 
486 void kvm_setup_xss_caps(void);
487 
488 /*
489  * Get a filtered version of KVM's supported XCR0 that strips out dynamic
490  * features for which the current process doesn't (yet) have permission to use.
491  * This is intended to be used only when enumerating support to userspace,
492  * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
493  * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
494  * userspace attempts to enable unpermitted features.
495  */
496 static inline u64 kvm_get_filtered_xcr0(void)
497 {
498 	u64 permitted_xcr0 = kvm_caps.supported_xcr0;
499 
500 	BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
501 
502 	if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
503 		permitted_xcr0 &= xstate_get_guest_group_perm();
504 
505 		/*
506 		 * Treat XTILE_CFG as unsupported if the current process isn't
507 		 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
508 		 * XCR0 without setting XTILE_DATA is architecturally illegal.
509 		 */
510 		if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
511 			permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
512 	}
513 	return permitted_xcr0;
514 }
515 
516 static inline bool kvm_mpx_supported(void)
517 {
518 	return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
519 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
520 }
521 
522 extern unsigned int min_timer_period_us;
523 
524 extern bool enable_vmware_backdoor;
525 
526 extern int pi_inject_timer;
527 
528 extern bool report_ignored_msrs;
529 
530 extern bool eager_page_split;
531 
532 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
533 {
534 	if (report_ignored_msrs)
535 		vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
536 }
537 
538 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
539 {
540 	if (report_ignored_msrs)
541 		vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
542 }
543 
544 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
545 {
546 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
547 				   vcpu->arch.virtual_tsc_shift);
548 }
549 
550 /* Same "calling convention" as do_div:
551  * - divide (n << 32) by base
552  * - put result in n
553  * - return remainder
554  */
555 #define do_shl32_div32(n, base)					\
556 	({							\
557 	    u32 __quot, __rem;					\
558 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
559 			: "rm" (base), "0" (0), "1" ((u32) n));	\
560 	    n = __quot;						\
561 	    __rem;						\
562 	 })
563 
564 static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
565 {
566 	kvm->arch.disabled_exits |= mask;
567 }
568 
569 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
570 {
571 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
572 }
573 
574 static inline bool kvm_hlt_in_guest(struct kvm *kvm)
575 {
576 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
577 }
578 
579 static inline bool kvm_pause_in_guest(struct kvm *kvm)
580 {
581 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
582 }
583 
584 static inline bool kvm_cstate_in_guest(struct kvm *kvm)
585 {
586 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
587 }
588 
589 static inline bool kvm_aperfmperf_in_guest(struct kvm *kvm)
590 {
591 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_APERFMPERF;
592 }
593 
594 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
595 {
596 	return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
597 }
598 
599 static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
600 						 enum kvm_intr_type intr)
601 {
602 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
603 }
604 
605 static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
606 {
607 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
608 }
609 
610 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
611 {
612 	return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
613 }
614 
615 static inline bool kvm_pat_valid(u64 data)
616 {
617 	if (data & 0xF8F8F8F8F8F8F8F8ull)
618 		return false;
619 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
620 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
621 }
622 
623 static inline bool kvm_dr7_valid(u64 data)
624 {
625 	/* Bits [63:32] are reserved */
626 	return !(data >> 32);
627 }
628 static inline bool kvm_dr6_valid(u64 data)
629 {
630 	/* Bits [63:32] are reserved */
631 	return !(data >> 32);
632 }
633 
634 /*
635  * Trigger machine check on the host. We assume all the MSRs are already set up
636  * by the CPU and that we still run on the same CPU as the MCE occurred on.
637  * We pass a fake environment to the machine check handler because we want
638  * the guest to be always treated like user space, no matter what context
639  * it used internally.
640  */
641 static inline void kvm_machine_check(void)
642 {
643 #if defined(CONFIG_X86_MCE)
644 	struct pt_regs regs = {
645 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
646 		.flags = X86_EFLAGS_IF,
647 	};
648 
649 	do_machine_check(&regs);
650 #endif
651 }
652 
653 int kvm_spec_ctrl_test_value(u64 value);
654 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
655 			      struct x86_exception *e);
656 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
657 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
658 
659 enum kvm_msr_access {
660 	MSR_TYPE_R	= BIT(0),
661 	MSR_TYPE_W	= BIT(1),
662 	MSR_TYPE_RW	= MSR_TYPE_R | MSR_TYPE_W,
663 };
664 
665 /*
666  * Internal error codes that are used to indicate that MSR emulation encountered
667  * an error that should result in #GP in the guest, unless userspace handles it.
668  * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
669  * as part of KVM's lightly documented internal KVM_RUN return codes.
670  *
671  * UNSUPPORTED	- The MSR isn't supported, either because it is completely
672  *		  unknown to KVM, or because the MSR should not exist according
673  *		  to the vCPU model.
674  *
675  * FILTERED	- Access to the MSR is denied by a userspace MSR filter.
676  */
677 #define  KVM_MSR_RET_UNSUPPORTED	2
678 #define  KVM_MSR_RET_FILTERED		3
679 
680 static inline bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
681 {
682 	return !(cr4 & vcpu->arch.cr4_guest_rsvd_bits);
683 }
684 
685 #define __cr4_reserved_bits(__cpu_has, __c)             \
686 ({                                                      \
687 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
688                                                         \
689 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
690 		__reserved_bits |= X86_CR4_OSXSAVE;     \
691 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
692 		__reserved_bits |= X86_CR4_SMEP;        \
693 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
694 		__reserved_bits |= X86_CR4_SMAP;        \
695 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
696 		__reserved_bits |= X86_CR4_FSGSBASE;    \
697 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
698 		__reserved_bits |= X86_CR4_PKE;         \
699 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
700 		__reserved_bits |= X86_CR4_LA57;        \
701 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
702 		__reserved_bits |= X86_CR4_UMIP;        \
703 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
704 		__reserved_bits |= X86_CR4_VMXE;        \
705 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
706 		__reserved_bits |= X86_CR4_PCIDE;       \
707 	if (!__cpu_has(__c, X86_FEATURE_LAM))           \
708 		__reserved_bits |= X86_CR4_LAM_SUP;     \
709 	if (!__cpu_has(__c, X86_FEATURE_SHSTK) &&       \
710 	    !__cpu_has(__c, X86_FEATURE_IBT))           \
711 		__reserved_bits |= X86_CR4_CET;         \
712 	__reserved_bits;                                \
713 })
714 
715 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
716 			  void *dst);
717 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
718 			 void *dst);
719 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
720 			 unsigned int port, void *data,  unsigned int count,
721 			 int in);
722 
723 static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
724 {
725 	return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
726 }
727 
728 int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
729 			      int (*complete_hypercall)(struct kvm_vcpu *));
730 
731 #define __kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall)			\
732 ({										\
733 	int __ret;								\
734 	__ret = ____kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall);	\
735 										\
736 	if (__ret > 0)								\
737 		__ret = complete_hypercall(_vcpu);				\
738 	__ret;									\
739 })
740 
741 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
742 
743 #define CET_US_RESERVED_BITS		GENMASK(9, 6)
744 #define CET_US_SHSTK_MASK_BITS		GENMASK(1, 0)
745 #define CET_US_IBT_MASK_BITS		(GENMASK_ULL(5, 2) | GENMASK_ULL(63, 10))
746 #define CET_US_LEGACY_BITMAP_BASE(data)	((data) >> 12)
747 
748 static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
749 {
750 	if (data & CET_US_RESERVED_BITS)
751 		return false;
752 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
753 	    (data & CET_US_SHSTK_MASK_BITS))
754 		return false;
755 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
756 	    (data & CET_US_IBT_MASK_BITS))
757 		return false;
758 	if (!IS_ALIGNED(CET_US_LEGACY_BITMAP_BASE(data), 4))
759 		return false;
760 	/* IBT can be suppressed iff the TRACKER isn't WAIT_ENDBR. */
761 	if ((data & CET_SUPPRESS) && (data & CET_WAIT_ENDBR))
762 		return false;
763 
764 	return true;
765 }
766 #endif
767