1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_X86_H 3 #define ARCH_X86_KVM_X86_H 4 5 #include <linux/kvm_host.h> 6 #include <asm/fpu/xstate.h> 7 #include <asm/mce.h> 8 #include <asm/pvclock.h> 9 #include "kvm_cache_regs.h" 10 #include "kvm_emulate.h" 11 12 struct kvm_caps { 13 /* control of guest tsc rate supported? */ 14 bool has_tsc_control; 15 /* maximum supported tsc_khz for guests */ 16 u32 max_guest_tsc_khz; 17 /* number of bits of the fractional part of the TSC scaling ratio */ 18 u8 tsc_scaling_ratio_frac_bits; 19 /* maximum allowed value of TSC scaling ratio */ 20 u64 max_tsc_scaling_ratio; 21 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */ 22 u64 default_tsc_scaling_ratio; 23 /* bus lock detection supported? */ 24 bool has_bus_lock_exit; 25 /* notify VM exit supported? */ 26 bool has_notify_vmexit; 27 /* bit mask of VM types */ 28 u32 supported_vm_types; 29 30 u64 supported_mce_cap; 31 u64 supported_xcr0; 32 u64 supported_xss; 33 u64 supported_perf_cap; 34 }; 35 36 struct kvm_host_values { 37 /* 38 * The host's raw MAXPHYADDR, i.e. the number of non-reserved physical 39 * address bits irrespective of features that repurpose legal bits, 40 * e.g. MKTME. 41 */ 42 u8 maxphyaddr; 43 44 u64 efer; 45 u64 xcr0; 46 u64 xss; 47 u64 arch_capabilities; 48 }; 49 50 void kvm_spurious_fault(void); 51 52 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 53 ({ \ 54 bool failed = (consistency_check); \ 55 if (failed) \ 56 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 57 failed; \ 58 }) 59 60 /* 61 * The first...last VMX feature MSRs that are emulated by KVM. This may or may 62 * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an 63 * associated feature that KVM supports for nested virtualization. 64 */ 65 #define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC 66 #define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC 67 68 #define KVM_DEFAULT_PLE_GAP 128 69 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 70 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 71 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 72 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 73 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 74 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 75 76 static inline unsigned int __grow_ple_window(unsigned int val, 77 unsigned int base, unsigned int modifier, unsigned int max) 78 { 79 u64 ret = val; 80 81 if (modifier < 1) 82 return base; 83 84 if (modifier < base) 85 ret *= modifier; 86 else 87 ret += modifier; 88 89 return min(ret, (u64)max); 90 } 91 92 static inline unsigned int __shrink_ple_window(unsigned int val, 93 unsigned int base, unsigned int modifier, unsigned int min) 94 { 95 if (modifier < 1) 96 return base; 97 98 if (modifier < base) 99 val /= modifier; 100 else 101 val -= modifier; 102 103 return max(val, min); 104 } 105 106 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 107 108 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 109 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 110 111 static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) 112 { 113 return vcpu->arch.last_vmentry_cpu != -1; 114 } 115 116 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) 117 { 118 return vcpu->arch.exception.pending || 119 vcpu->arch.exception_vmexit.pending || 120 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 121 } 122 123 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 124 { 125 vcpu->arch.exception.pending = false; 126 vcpu->arch.exception.injected = false; 127 vcpu->arch.exception_vmexit.pending = false; 128 } 129 130 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 131 bool soft) 132 { 133 vcpu->arch.interrupt.injected = true; 134 vcpu->arch.interrupt.soft = soft; 135 vcpu->arch.interrupt.nr = vector; 136 } 137 138 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 139 { 140 vcpu->arch.interrupt.injected = false; 141 } 142 143 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 144 { 145 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 146 vcpu->arch.nmi_injected; 147 } 148 149 static inline bool kvm_exception_is_soft(unsigned int nr) 150 { 151 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 152 } 153 154 static inline bool is_protmode(struct kvm_vcpu *vcpu) 155 { 156 return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE); 157 } 158 159 static inline bool is_long_mode(struct kvm_vcpu *vcpu) 160 { 161 #ifdef CONFIG_X86_64 162 return !!(vcpu->arch.efer & EFER_LMA); 163 #else 164 return false; 165 #endif 166 } 167 168 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 169 { 170 int cs_db, cs_l; 171 172 WARN_ON_ONCE(vcpu->arch.guest_state_protected); 173 174 if (!is_long_mode(vcpu)) 175 return false; 176 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 177 return cs_l; 178 } 179 180 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) 181 { 182 /* 183 * If running with protected guest state, the CS register is not 184 * accessible. The hypercall register values will have had to been 185 * provided in 64-bit mode, so assume the guest is in 64-bit. 186 */ 187 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); 188 } 189 190 static inline bool x86_exception_has_error_code(unsigned int vector) 191 { 192 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 193 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 194 BIT(PF_VECTOR) | BIT(AC_VECTOR); 195 196 return (1U << vector) & exception_has_error_code; 197 } 198 199 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 200 { 201 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 202 } 203 204 static inline bool is_pae(struct kvm_vcpu *vcpu) 205 { 206 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE); 207 } 208 209 static inline bool is_pse(struct kvm_vcpu *vcpu) 210 { 211 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE); 212 } 213 214 static inline bool is_paging(struct kvm_vcpu *vcpu) 215 { 216 return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG)); 217 } 218 219 static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 220 { 221 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 222 } 223 224 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 225 { 226 return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48; 227 } 228 229 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 230 { 231 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); 232 } 233 234 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 235 gva_t gva, gfn_t gfn, unsigned access) 236 { 237 u64 gen = kvm_memslots(vcpu->kvm)->generation; 238 239 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 240 return; 241 242 /* 243 * If this is a shadow nested page table, the "GVA" is 244 * actually a nGPA. 245 */ 246 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 247 vcpu->arch.mmio_access = access; 248 vcpu->arch.mmio_gfn = gfn; 249 vcpu->arch.mmio_gen = gen; 250 } 251 252 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 253 { 254 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 255 } 256 257 /* 258 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 259 * clear all mmio cache info. 260 */ 261 #define MMIO_GVA_ANY (~(gva_t)0) 262 263 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 264 { 265 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 266 return; 267 268 vcpu->arch.mmio_gva = 0; 269 } 270 271 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 272 { 273 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 274 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 275 return true; 276 277 return false; 278 } 279 280 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 281 { 282 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 283 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 284 return true; 285 286 return false; 287 } 288 289 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 290 { 291 unsigned long val = kvm_register_read_raw(vcpu, reg); 292 293 return is_64_bit_mode(vcpu) ? val : (u32)val; 294 } 295 296 static inline void kvm_register_write(struct kvm_vcpu *vcpu, 297 int reg, unsigned long val) 298 { 299 if (!is_64_bit_mode(vcpu)) 300 val = (u32)val; 301 return kvm_register_write_raw(vcpu, reg, val); 302 } 303 304 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 305 { 306 return !(kvm->arch.disabled_quirks & quirk); 307 } 308 309 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 310 311 u64 get_kvmclock_ns(struct kvm *kvm); 312 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm); 313 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp); 314 315 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 316 gva_t addr, void *val, unsigned int bytes, 317 struct x86_exception *exception); 318 319 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 320 gva_t addr, void *val, unsigned int bytes, 321 struct x86_exception *exception); 322 323 int handle_ud(struct kvm_vcpu *vcpu); 324 325 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 326 struct kvm_queued_exception *ex); 327 328 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 329 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 330 bool kvm_vector_hashing_enabled(void); 331 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 332 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 333 void *insn, int insn_len); 334 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 335 int emulation_type, void *insn, int insn_len); 336 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 337 338 extern struct kvm_caps kvm_caps; 339 extern struct kvm_host_values kvm_host; 340 341 extern bool enable_pmu; 342 343 /* 344 * Get a filtered version of KVM's supported XCR0 that strips out dynamic 345 * features for which the current process doesn't (yet) have permission to use. 346 * This is intended to be used only when enumerating support to userspace, 347 * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be 348 * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if 349 * userspace attempts to enable unpermitted features. 350 */ 351 static inline u64 kvm_get_filtered_xcr0(void) 352 { 353 u64 permitted_xcr0 = kvm_caps.supported_xcr0; 354 355 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); 356 357 if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) { 358 permitted_xcr0 &= xstate_get_guest_group_perm(); 359 360 /* 361 * Treat XTILE_CFG as unsupported if the current process isn't 362 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in 363 * XCR0 without setting XTILE_DATA is architecturally illegal. 364 */ 365 if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA)) 366 permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG; 367 } 368 return permitted_xcr0; 369 } 370 371 static inline bool kvm_mpx_supported(void) 372 { 373 return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 374 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 375 } 376 377 extern unsigned int min_timer_period_us; 378 379 extern bool enable_vmware_backdoor; 380 381 extern int pi_inject_timer; 382 383 extern bool report_ignored_msrs; 384 385 extern bool eager_page_split; 386 387 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 388 { 389 if (report_ignored_msrs) 390 vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data); 391 } 392 393 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr) 394 { 395 if (report_ignored_msrs) 396 vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr); 397 } 398 399 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 400 { 401 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 402 vcpu->arch.virtual_tsc_shift); 403 } 404 405 /* Same "calling convention" as do_div: 406 * - divide (n << 32) by base 407 * - put result in n 408 * - return remainder 409 */ 410 #define do_shl32_div32(n, base) \ 411 ({ \ 412 u32 __quot, __rem; \ 413 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 414 : "rm" (base), "0" (0), "1" ((u32) n)); \ 415 n = __quot; \ 416 __rem; \ 417 }) 418 419 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 420 { 421 return kvm->arch.mwait_in_guest; 422 } 423 424 static inline bool kvm_hlt_in_guest(struct kvm *kvm) 425 { 426 return kvm->arch.hlt_in_guest; 427 } 428 429 static inline bool kvm_pause_in_guest(struct kvm *kvm) 430 { 431 return kvm->arch.pause_in_guest; 432 } 433 434 static inline bool kvm_cstate_in_guest(struct kvm *kvm) 435 { 436 return kvm->arch.cstate_in_guest; 437 } 438 439 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm) 440 { 441 return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED; 442 } 443 444 static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, 445 enum kvm_intr_type intr) 446 { 447 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr); 448 } 449 450 static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 451 { 452 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0); 453 } 454 455 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu) 456 { 457 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI; 458 } 459 460 static inline bool kvm_pat_valid(u64 data) 461 { 462 if (data & 0xF8F8F8F8F8F8F8F8ull) 463 return false; 464 /* 0, 1, 4, 5, 6, 7 are valid values. */ 465 return (data | ((data & 0x0202020202020202ull) << 1)) == data; 466 } 467 468 static inline bool kvm_dr7_valid(u64 data) 469 { 470 /* Bits [63:32] are reserved */ 471 return !(data >> 32); 472 } 473 static inline bool kvm_dr6_valid(u64 data) 474 { 475 /* Bits [63:32] are reserved */ 476 return !(data >> 32); 477 } 478 479 /* 480 * Trigger machine check on the host. We assume all the MSRs are already set up 481 * by the CPU and that we still run on the same CPU as the MCE occurred on. 482 * We pass a fake environment to the machine check handler because we want 483 * the guest to be always treated like user space, no matter what context 484 * it used internally. 485 */ 486 static inline void kvm_machine_check(void) 487 { 488 #if defined(CONFIG_X86_MCE) 489 struct pt_regs regs = { 490 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 491 .flags = X86_EFLAGS_IF, 492 }; 493 494 do_machine_check(®s); 495 #endif 496 } 497 498 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 499 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 500 int kvm_spec_ctrl_test_value(u64 value); 501 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 502 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 503 struct x86_exception *e); 504 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 505 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 506 507 /* 508 * Internal error codes that are used to indicate that MSR emulation encountered 509 * an error that should result in #GP in the guest, unless userspace 510 * handles it. 511 */ 512 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 513 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 514 515 #define __cr4_reserved_bits(__cpu_has, __c) \ 516 ({ \ 517 u64 __reserved_bits = CR4_RESERVED_BITS; \ 518 \ 519 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 520 __reserved_bits |= X86_CR4_OSXSAVE; \ 521 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 522 __reserved_bits |= X86_CR4_SMEP; \ 523 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 524 __reserved_bits |= X86_CR4_SMAP; \ 525 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 526 __reserved_bits |= X86_CR4_FSGSBASE; \ 527 if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 528 __reserved_bits |= X86_CR4_PKE; \ 529 if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 530 __reserved_bits |= X86_CR4_LA57; \ 531 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 532 __reserved_bits |= X86_CR4_UMIP; \ 533 if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 534 __reserved_bits |= X86_CR4_VMXE; \ 535 if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 536 __reserved_bits |= X86_CR4_PCIDE; \ 537 if (!__cpu_has(__c, X86_FEATURE_LAM)) \ 538 __reserved_bits |= X86_CR4_LAM_SUP; \ 539 __reserved_bits; \ 540 }) 541 542 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 543 void *dst); 544 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 545 void *dst); 546 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 547 unsigned int port, void *data, unsigned int count, 548 int in); 549 550 #endif 551