xref: /linux/arch/x86/kvm/x86.h (revision ea8bc95fbb75da215b7533c7c46f63423e84ff5e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
4 
5 #include <linux/kvm_host.h>
6 #include <asm/fpu/xstate.h>
7 #include <asm/mce.h>
8 #include <asm/pvclock.h>
9 #include "kvm_cache_regs.h"
10 #include "kvm_emulate.h"
11 #include "cpuid.h"
12 
13 #define KVM_MAX_MCE_BANKS 32
14 
15 struct kvm_caps {
16 	/* control of guest tsc rate supported? */
17 	bool has_tsc_control;
18 	/* maximum supported tsc_khz for guests */
19 	u32  max_guest_tsc_khz;
20 	/* number of bits of the fractional part of the TSC scaling ratio */
21 	u8   tsc_scaling_ratio_frac_bits;
22 	/* maximum allowed value of TSC scaling ratio */
23 	u64  max_tsc_scaling_ratio;
24 	/* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
25 	u64  default_tsc_scaling_ratio;
26 	/* bus lock detection supported? */
27 	bool has_bus_lock_exit;
28 	/* notify VM exit supported? */
29 	bool has_notify_vmexit;
30 	/* bit mask of VM types */
31 	u32 supported_vm_types;
32 
33 	u64 supported_mce_cap;
34 	u64 supported_xcr0;
35 	u64 supported_xss;
36 	u64 supported_perf_cap;
37 
38 	u64 supported_quirks;
39 	u64 inapplicable_quirks;
40 };
41 
42 struct kvm_host_values {
43 	/*
44 	 * The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
45 	 * address bits irrespective of features that repurpose legal bits,
46 	 * e.g. MKTME.
47 	 */
48 	u8 maxphyaddr;
49 
50 	u64 efer;
51 	u64 xcr0;
52 	u64 xss;
53 	u64 s_cet;
54 	u64 arch_capabilities;
55 };
56 
57 void kvm_spurious_fault(void);
58 
59 #define SIZE_OF_MEMSLOTS_HASHTABLE \
60 	(sizeof(((struct kvm_memslots *)0)->id_hash) * 2 * KVM_MAX_NR_ADDRESS_SPACES)
61 
62 /* Sanity check the size of the memslot hash tables. */
63 static_assert(SIZE_OF_MEMSLOTS_HASHTABLE ==
64 	      (1024 * (1 + IS_ENABLED(CONFIG_X86_64)) * (1 + IS_ENABLED(CONFIG_KVM_SMM))));
65 
66 /*
67  * Assert that "struct kvm_{svm,vmx,tdx}" is an order-0 or order-1 allocation.
68  * Spilling over to an order-2 allocation isn't fundamentally problematic, but
69  * isn't expected to happen in the foreseeable future (O(years)).  Assert that
70  * the size is an order-0 allocation when ignoring the memslot hash tables, to
71  * help detect and debug unexpected size increases.
72  */
73 #define KVM_SANITY_CHECK_VM_STRUCT_SIZE(x)						\
74 do {											\
75 	BUILD_BUG_ON(get_order(sizeof(struct x) - SIZE_OF_MEMSLOTS_HASHTABLE) &&	\
76 		     !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN));	\
77 	BUILD_BUG_ON(get_order(sizeof(struct x)) > 1 &&					\
78 		     !IS_ENABLED(CONFIG_DEBUG_KERNEL) && !IS_ENABLED(CONFIG_KASAN));	\
79 } while (0)
80 
81 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
82 ({									\
83 	bool failed = (consistency_check);				\
84 	if (failed)							\
85 		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
86 	failed;								\
87 })
88 
89 /*
90  * The first...last VMX feature MSRs that are emulated by KVM.  This may or may
91  * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
92  * associated feature that KVM supports for nested virtualization.
93  */
94 #define KVM_FIRST_EMULATED_VMX_MSR	MSR_IA32_VMX_BASIC
95 #define KVM_LAST_EMULATED_VMX_MSR	MSR_IA32_VMX_VMFUNC
96 
97 #define KVM_DEFAULT_PLE_GAP		128
98 #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
99 #define KVM_DEFAULT_PLE_WINDOW_GROW	2
100 #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
101 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
102 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
103 #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
104 
105 /*
106  * KVM's internal, non-ABI indices for synthetic MSRs. The values themselves
107  * are arbitrary and have no meaning, the only requirement is that they don't
108  * conflict with "real" MSRs that KVM supports. Use values at the upper end
109  * of KVM's reserved paravirtual MSR range to minimize churn, i.e. these values
110  * will be usable until KVM exhausts its supply of paravirtual MSR indices.
111  */
112 
113 #define MSR_KVM_INTERNAL_GUEST_SSP	0x4b564dff
114 
115 static inline unsigned int __grow_ple_window(unsigned int val,
116 		unsigned int base, unsigned int modifier, unsigned int max)
117 {
118 	u64 ret = val;
119 
120 	if (modifier < 1)
121 		return base;
122 
123 	if (modifier < base)
124 		ret *= modifier;
125 	else
126 		ret += modifier;
127 
128 	return min(ret, (u64)max);
129 }
130 
131 static inline unsigned int __shrink_ple_window(unsigned int val,
132 		unsigned int base, unsigned int modifier, unsigned int min)
133 {
134 	if (modifier < 1)
135 		return base;
136 
137 	if (modifier < base)
138 		val /= modifier;
139 	else
140 		val -= modifier;
141 
142 	return max(val, min);
143 }
144 
145 #define MSR_IA32_CR_PAT_DEFAULT	\
146 	PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
147 
148 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
149 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
150 
151 /* Forcibly leave the nested mode in cases like a vCPU reset */
152 static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
153 {
154 	kvm_x86_ops.nested_ops->leave_nested(vcpu);
155 }
156 
157 /*
158  * If IBRS is advertised to the vCPU, KVM must flush the indirect branch
159  * predictors when transitioning from L2 to L1, as L1 expects hardware (KVM in
160  * this case) to provide separate predictor modes.  Bare metal isolates the host
161  * from the guest, but doesn't isolate different guests from one another (in
162  * this case L1 and L2). The exception is if bare metal supports same mode IBRS,
163  * which offers protection within the same mode, and hence protects L1 from L2.
164  */
165 static inline void kvm_nested_vmexit_handle_ibrs(struct kvm_vcpu *vcpu)
166 {
167 	if (cpu_feature_enabled(X86_FEATURE_AMD_IBRS_SAME_MODE))
168 		return;
169 
170 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
171 	    guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS))
172 		indirect_branch_prediction_barrier();
173 }
174 
175 /*
176  * Disallow modifying CPUID and feature MSRs, which affect the core virtual CPU
177  * model exposed to the guest and virtualized by KVM, if the vCPU has already
178  * run or is in guest mode (L2).  In both cases, KVM has already consumed the
179  * current virtual CPU model, and doesn't support "unwinding" to react to the
180  * new model.
181  *
182  * Note, the only way is_guest_mode() can be true with 'last_vmentry_cpu == -1'
183  * is if userspace sets CPUID and feature MSRs (to enable VMX/SVM), then sets
184  * nested state, and then attempts to set CPUID and/or feature MSRs *again*.
185  */
186 static inline bool kvm_can_set_cpuid_and_feature_msrs(struct kvm_vcpu *vcpu)
187 {
188 	return vcpu->arch.last_vmentry_cpu == -1 && !is_guest_mode(vcpu);
189 }
190 
191 /*
192  * WARN if a nested VM-Enter is pending completion, and userspace hasn't gained
193  * control since the nested VM-Enter was initiated (in which case, userspace
194  * may have modified vCPU state to induce an architecturally invalid VM-Exit).
195  */
196 static inline void kvm_warn_on_nested_run_pending(struct kvm_vcpu *vcpu)
197 {
198 	WARN_ON_ONCE(vcpu->arch.nested_run_pending == KVM_NESTED_RUN_PENDING);
199 }
200 
201 static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state)
202 {
203 	vcpu->arch.mp_state = mp_state;
204 	if (mp_state == KVM_MP_STATE_RUNNABLE)
205 		vcpu->arch.pv.pv_unhalted = false;
206 }
207 
208 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
209 {
210 	return vcpu->arch.exception.pending ||
211 	       vcpu->arch.exception_vmexit.pending ||
212 	       kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
213 }
214 
215 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
216 {
217 	vcpu->arch.exception.pending = false;
218 	vcpu->arch.exception.injected = false;
219 	vcpu->arch.exception_vmexit.pending = false;
220 }
221 
222 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
223 	bool soft)
224 {
225 	vcpu->arch.interrupt.injected = true;
226 	vcpu->arch.interrupt.soft = soft;
227 	vcpu->arch.interrupt.nr = vector;
228 }
229 
230 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
231 {
232 	vcpu->arch.interrupt.injected = false;
233 }
234 
235 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
236 {
237 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
238 		vcpu->arch.nmi_injected;
239 }
240 
241 static inline bool kvm_exception_is_soft(unsigned int nr)
242 {
243 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
244 }
245 
246 static inline bool is_protmode(struct kvm_vcpu *vcpu)
247 {
248 	return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
249 }
250 
251 static inline bool is_long_mode(struct kvm_vcpu *vcpu)
252 {
253 #ifdef CONFIG_X86_64
254 	return !!(vcpu->arch.efer & EFER_LMA);
255 #else
256 	return false;
257 #endif
258 }
259 
260 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
261 {
262 	int cs_db, cs_l;
263 
264 	WARN_ON_ONCE(vcpu->arch.guest_state_protected);
265 
266 	if (!is_long_mode(vcpu))
267 		return false;
268 	kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
269 	return cs_l;
270 }
271 
272 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
273 {
274 	/*
275 	 * If running with protected guest state, the CS register is not
276 	 * accessible. The hypercall register values will have had to been
277 	 * provided in 64-bit mode, so assume the guest is in 64-bit.
278 	 */
279 	return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
280 }
281 
282 static inline bool x86_exception_has_error_code(unsigned int vector)
283 {
284 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
285 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
286 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
287 
288 	return (1U << vector) & exception_has_error_code;
289 }
290 
291 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
292 {
293 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
294 }
295 
296 static inline bool is_pae(struct kvm_vcpu *vcpu)
297 {
298 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
299 }
300 
301 static inline bool is_pse(struct kvm_vcpu *vcpu)
302 {
303 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
304 }
305 
306 static inline bool is_paging(struct kvm_vcpu *vcpu)
307 {
308 	return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
309 }
310 
311 static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
312 {
313 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
314 }
315 
316 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
317 {
318 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
319 }
320 
321 static inline u8 max_host_virt_addr_bits(void)
322 {
323 	return kvm_cpu_cap_has(X86_FEATURE_LA57) ? 57 : 48;
324 }
325 
326 /*
327  * x86 MSRs which contain linear addresses, x86 hidden segment bases, and
328  * IDT/GDT bases have static canonicality checks, the size of which depends
329  * only on the CPU's support for 5-level paging, rather than on the state of
330  * CR4.LA57.  This applies to both WRMSR and to other instructions that set
331  * their values, e.g. SGDT.
332  *
333  * KVM passes through most of these MSRS and also doesn't intercept the
334  * instructions that set the hidden segment bases.
335  *
336  * Because of this, to be consistent with hardware, even if the guest doesn't
337  * have LA57 enabled in its CPUID, perform canonicality checks based on *host*
338  * support for 5 level paging.
339  *
340  * Finally, instructions which are related to MMU invalidation of a given
341  * linear address, also have a similar static canonical check on address.
342  * This allows for example to invalidate 5-level addresses of a guest from a
343  * host which uses 4-level paging.
344  */
345 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu,
346 					   unsigned int flags)
347 {
348 	if (flags & (X86EMUL_F_INVLPG | X86EMUL_F_MSR | X86EMUL_F_DT_LOAD))
349 		return !__is_canonical_address(la, max_host_virt_addr_bits());
350 	else
351 		return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
352 }
353 
354 static inline bool is_noncanonical_msr_address(u64 la, struct kvm_vcpu *vcpu)
355 {
356 	return is_noncanonical_address(la, vcpu, X86EMUL_F_MSR);
357 }
358 
359 static inline bool is_noncanonical_base_address(u64 la, struct kvm_vcpu *vcpu)
360 {
361 	return is_noncanonical_address(la, vcpu, X86EMUL_F_DT_LOAD);
362 }
363 
364 static inline bool is_noncanonical_invlpg_address(u64 la, struct kvm_vcpu *vcpu)
365 {
366 	return is_noncanonical_address(la, vcpu, X86EMUL_F_INVLPG);
367 }
368 
369 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
370 					gva_t gva, gfn_t gfn, unsigned access)
371 {
372 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
373 
374 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
375 		return;
376 
377 	/*
378 	 * If this is a shadow nested page table, the "GVA" is
379 	 * actually a nGPA.
380 	 */
381 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
382 	vcpu->arch.mmio_access = access;
383 	vcpu->arch.mmio_gfn = gfn;
384 	vcpu->arch.mmio_gen = gen;
385 }
386 
387 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
388 {
389 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
390 }
391 
392 /*
393  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
394  * clear all mmio cache info.
395  */
396 #define MMIO_GVA_ANY (~(gva_t)0)
397 
398 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
399 {
400 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
401 		return;
402 
403 	vcpu->arch.mmio_gva = 0;
404 }
405 
406 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
407 {
408 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
409 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
410 		return true;
411 
412 	return false;
413 }
414 
415 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
416 {
417 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
418 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
419 		return true;
420 
421 	return false;
422 }
423 
424 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
425 {
426 	unsigned long val = kvm_register_read_raw(vcpu, reg);
427 
428 	return is_64_bit_mode(vcpu) ? val : (u32)val;
429 }
430 
431 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
432 				       int reg, unsigned long val)
433 {
434 	if (!is_64_bit_mode(vcpu))
435 		val = (u32)val;
436 	return kvm_register_write_raw(vcpu, reg, val);
437 }
438 
439 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
440 {
441 	return !(kvm->arch.disabled_quirks & quirk);
442 }
443 
444 static __always_inline void kvm_request_l1tf_flush_l1d(void)
445 {
446 #if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
447 	/*
448 	 * Use a raw write to set the per-CPU flag, as KVM will ensure a flush
449 	 * even if preemption is currently enabled..  If the current vCPU task
450 	 * is migrated to a different CPU (or userspace runs the vCPU on a
451 	 * different task) before the next VM-Entry, then kvm_arch_vcpu_load()
452 	 * will request a flush on the new CPU.
453 	 */
454 	raw_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
455 #endif
456 }
457 
458 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
459 
460 u64 get_kvmclock_ns(struct kvm *kvm);
461 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
462 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
463 int kvm_guest_time_update(struct kvm_vcpu *v);
464 
465 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
466 	gva_t addr, void *val, unsigned int bytes,
467 	struct x86_exception *exception);
468 
469 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
470 	gva_t addr, void *val, unsigned int bytes,
471 	struct x86_exception *exception);
472 
473 int handle_ud(struct kvm_vcpu *vcpu);
474 
475 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
476 				   struct kvm_queued_exception *ex);
477 
478 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
479 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
480 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
481 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
482 				    void *insn, int insn_len);
483 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
484 			    int emulation_type, void *insn, int insn_len);
485 fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu);
486 fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
487 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
488 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu);
489 
490 extern struct kvm_caps kvm_caps;
491 extern struct kvm_host_values kvm_host;
492 
493 extern bool enable_pmu;
494 extern bool enable_mediated_pmu;
495 
496 void kvm_setup_xss_caps(void);
497 
498 /*
499  * Get a filtered version of KVM's supported XCR0 that strips out dynamic
500  * features for which the current process doesn't (yet) have permission to use.
501  * This is intended to be used only when enumerating support to userspace,
502  * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
503  * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
504  * userspace attempts to enable unpermitted features.
505  */
506 static inline u64 kvm_get_filtered_xcr0(void)
507 {
508 	u64 permitted_xcr0 = kvm_caps.supported_xcr0;
509 
510 	BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
511 
512 	if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
513 		permitted_xcr0 &= xstate_get_guest_group_perm();
514 
515 		/*
516 		 * Treat XTILE_CFG as unsupported if the current process isn't
517 		 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
518 		 * XCR0 without setting XTILE_DATA is architecturally illegal.
519 		 */
520 		if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
521 			permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
522 	}
523 	return permitted_xcr0;
524 }
525 
526 static inline bool kvm_mpx_supported(void)
527 {
528 	return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
529 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
530 }
531 
532 extern unsigned int min_timer_period_us;
533 
534 extern bool enable_vmware_backdoor;
535 
536 extern int pi_inject_timer;
537 
538 extern bool report_ignored_msrs;
539 
540 extern bool eager_page_split;
541 
542 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
543 {
544 	if (report_ignored_msrs)
545 		vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
546 }
547 
548 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
549 {
550 	if (report_ignored_msrs)
551 		vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
552 }
553 
554 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
555 {
556 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
557 				   vcpu->arch.virtual_tsc_shift);
558 }
559 
560 /* Same "calling convention" as do_div:
561  * - divide (n << 32) by base
562  * - put result in n
563  * - return remainder
564  */
565 #define do_shl32_div32(n, base)					\
566 	({							\
567 	    u32 __quot, __rem;					\
568 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
569 			: "rm" (base), "0" (0), "1" ((u32) n));	\
570 	    n = __quot;						\
571 	    __rem;						\
572 	 })
573 
574 static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
575 {
576 	kvm->arch.disabled_exits |= mask;
577 }
578 
579 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
580 {
581 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
582 }
583 
584 static inline bool kvm_hlt_in_guest(struct kvm *kvm)
585 {
586 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
587 }
588 
589 static inline bool kvm_pause_in_guest(struct kvm *kvm)
590 {
591 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
592 }
593 
594 static inline bool kvm_cstate_in_guest(struct kvm *kvm)
595 {
596 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
597 }
598 
599 static inline bool kvm_aperfmperf_in_guest(struct kvm *kvm)
600 {
601 	return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_APERFMPERF;
602 }
603 
604 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
605 {
606 	return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
607 }
608 
609 static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
610 						 enum kvm_intr_type intr)
611 {
612 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
613 }
614 
615 static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
616 {
617 	WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
618 }
619 
620 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
621 {
622 	return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
623 }
624 
625 static inline bool kvm_pat_valid(u64 data)
626 {
627 	if (data & 0xF8F8F8F8F8F8F8F8ull)
628 		return false;
629 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
630 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
631 }
632 
633 static inline bool kvm_dr7_valid(u64 data)
634 {
635 	/* Bits [63:32] are reserved */
636 	return !(data >> 32);
637 }
638 static inline bool kvm_dr6_valid(u64 data)
639 {
640 	/* Bits [63:32] are reserved */
641 	return !(data >> 32);
642 }
643 
644 /*
645  * Trigger machine check on the host. We assume all the MSRs are already set up
646  * by the CPU and that we still run on the same CPU as the MCE occurred on.
647  * We pass a fake environment to the machine check handler because we want
648  * the guest to be always treated like user space, no matter what context
649  * it used internally.
650  */
651 static inline void kvm_machine_check(void)
652 {
653 #if defined(CONFIG_X86_MCE)
654 	struct pt_regs regs = {
655 		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
656 		.flags = X86_EFLAGS_IF,
657 	};
658 
659 	do_machine_check(&regs);
660 #endif
661 }
662 
663 int kvm_spec_ctrl_test_value(u64 value);
664 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
665 			      struct x86_exception *e);
666 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
667 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
668 
669 enum kvm_msr_access {
670 	MSR_TYPE_R	= BIT(0),
671 	MSR_TYPE_W	= BIT(1),
672 	MSR_TYPE_RW	= MSR_TYPE_R | MSR_TYPE_W,
673 };
674 
675 /*
676  * Internal error codes that are used to indicate that MSR emulation encountered
677  * an error that should result in #GP in the guest, unless userspace handles it.
678  * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
679  * as part of KVM's lightly documented internal KVM_RUN return codes.
680  *
681  * UNSUPPORTED	- The MSR isn't supported, either because it is completely
682  *		  unknown to KVM, or because the MSR should not exist according
683  *		  to the vCPU model.
684  *
685  * FILTERED	- Access to the MSR is denied by a userspace MSR filter.
686  */
687 #define  KVM_MSR_RET_UNSUPPORTED	2
688 #define  KVM_MSR_RET_FILTERED		3
689 
690 static inline bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
691 {
692 	return !(cr4 & vcpu->arch.cr4_guest_rsvd_bits);
693 }
694 
695 #define __cr4_reserved_bits(__cpu_has, __c)             \
696 ({                                                      \
697 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
698                                                         \
699 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
700 		__reserved_bits |= X86_CR4_OSXSAVE;     \
701 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
702 		__reserved_bits |= X86_CR4_SMEP;        \
703 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
704 		__reserved_bits |= X86_CR4_SMAP;        \
705 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
706 		__reserved_bits |= X86_CR4_FSGSBASE;    \
707 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
708 		__reserved_bits |= X86_CR4_PKE;         \
709 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
710 		__reserved_bits |= X86_CR4_LA57;        \
711 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
712 		__reserved_bits |= X86_CR4_UMIP;        \
713 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
714 		__reserved_bits |= X86_CR4_VMXE;        \
715 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
716 		__reserved_bits |= X86_CR4_PCIDE;       \
717 	if (!__cpu_has(__c, X86_FEATURE_LAM))           \
718 		__reserved_bits |= X86_CR4_LAM_SUP;     \
719 	if (!__cpu_has(__c, X86_FEATURE_SHSTK) &&       \
720 	    !__cpu_has(__c, X86_FEATURE_IBT))           \
721 		__reserved_bits |= X86_CR4_CET;         \
722 	__reserved_bits;                                \
723 })
724 
725 int kvm_sev_es_mmio(struct kvm_vcpu *vcpu, bool is_write, gpa_t gpa,
726 		    unsigned int bytes, void *data);
727 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
728 			 unsigned int port, void *data,  unsigned int count,
729 			 int in);
730 
731 static inline void __kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
732 						    gpa_t gpa, unsigned int len,
733 						    const void *data,
734 						    bool is_write)
735 {
736 	struct kvm_run *run = vcpu->run;
737 
738 	KVM_BUG_ON(len > 8, vcpu->kvm);
739 
740 	run->mmio.len = len;
741 	run->mmio.is_write = is_write;
742 	run->exit_reason = KVM_EXIT_MMIO;
743 	run->mmio.phys_addr = gpa;
744 	if (is_write)
745 		memcpy(run->mmio.data, data, len);
746 }
747 
748 static inline void kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu,
749 						  struct kvm_mmio_fragment *frag)
750 {
751 	WARN_ON_ONCE(!vcpu->mmio_needed || !vcpu->mmio_nr_fragments);
752 
753 	__kvm_prepare_emulated_mmio_exit(vcpu, frag->gpa, min(8u, frag->len),
754 					 frag->data, vcpu->mmio_is_write);
755 }
756 
757 static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
758 {
759 	return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
760 }
761 
762 int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
763 			      int (*complete_hypercall)(struct kvm_vcpu *));
764 
765 #define __kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall)			\
766 ({										\
767 	int __ret;								\
768 	__ret = ____kvm_emulate_hypercall(_vcpu, cpl, complete_hypercall);	\
769 										\
770 	if (__ret > 0)								\
771 		__ret = complete_hypercall(_vcpu);				\
772 	__ret;									\
773 })
774 
775 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
776 
777 #define CET_US_RESERVED_BITS		GENMASK(9, 6)
778 #define CET_US_SHSTK_MASK_BITS		GENMASK(1, 0)
779 #define CET_US_IBT_MASK_BITS		(GENMASK_ULL(5, 2) | GENMASK_ULL(63, 10))
780 #define CET_US_LEGACY_BITMAP_BASE(data)	((data) >> 12)
781 
782 static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
783 {
784 	if (data & CET_US_RESERVED_BITS)
785 		return false;
786 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
787 	    (data & CET_US_SHSTK_MASK_BITS))
788 		return false;
789 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
790 	    (data & CET_US_IBT_MASK_BITS))
791 		return false;
792 	if (!IS_ALIGNED(CET_US_LEGACY_BITMAP_BASE(data), 4))
793 		return false;
794 	/* IBT can be suppressed iff the TRACKER isn't WAIT_ENDBR. */
795 	if ((data & CET_SUPPRESS) && (data & CET_WAIT_ENDBR))
796 		return false;
797 
798 	return true;
799 }
800 #endif
801