1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef ARCH_X86_KVM_X86_H 3 #define ARCH_X86_KVM_X86_H 4 5 #include <linux/kvm_host.h> 6 #include <asm/mce.h> 7 #include <asm/pvclock.h> 8 #include "kvm_cache_regs.h" 9 #include "kvm_emulate.h" 10 11 struct kvm_caps { 12 /* control of guest tsc rate supported? */ 13 bool has_tsc_control; 14 /* maximum supported tsc_khz for guests */ 15 u32 max_guest_tsc_khz; 16 /* number of bits of the fractional part of the TSC scaling ratio */ 17 u8 tsc_scaling_ratio_frac_bits; 18 /* maximum allowed value of TSC scaling ratio */ 19 u64 max_tsc_scaling_ratio; 20 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */ 21 u64 default_tsc_scaling_ratio; 22 /* bus lock detection supported? */ 23 bool has_bus_lock_exit; 24 /* notify VM exit supported? */ 25 bool has_notify_vmexit; 26 27 u64 supported_mce_cap; 28 u64 supported_xcr0; 29 u64 supported_xss; 30 }; 31 32 void kvm_spurious_fault(void); 33 34 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 35 ({ \ 36 bool failed = (consistency_check); \ 37 if (failed) \ 38 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 39 failed; \ 40 }) 41 42 #define KVM_DEFAULT_PLE_GAP 128 43 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 44 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 45 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 46 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 47 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 48 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 49 50 static inline unsigned int __grow_ple_window(unsigned int val, 51 unsigned int base, unsigned int modifier, unsigned int max) 52 { 53 u64 ret = val; 54 55 if (modifier < 1) 56 return base; 57 58 if (modifier < base) 59 ret *= modifier; 60 else 61 ret += modifier; 62 63 return min(ret, (u64)max); 64 } 65 66 static inline unsigned int __shrink_ple_window(unsigned int val, 67 unsigned int base, unsigned int modifier, unsigned int min) 68 { 69 if (modifier < 1) 70 return base; 71 72 if (modifier < base) 73 val /= modifier; 74 else 75 val -= modifier; 76 77 return max(val, min); 78 } 79 80 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 81 82 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 83 int kvm_check_nested_events(struct kvm_vcpu *vcpu); 84 85 static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) 86 { 87 return vcpu->arch.exception.pending || 88 vcpu->arch.exception_vmexit.pending || 89 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 90 } 91 92 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 93 { 94 vcpu->arch.exception.pending = false; 95 vcpu->arch.exception.injected = false; 96 vcpu->arch.exception_vmexit.pending = false; 97 } 98 99 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 100 bool soft) 101 { 102 vcpu->arch.interrupt.injected = true; 103 vcpu->arch.interrupt.soft = soft; 104 vcpu->arch.interrupt.nr = vector; 105 } 106 107 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 108 { 109 vcpu->arch.interrupt.injected = false; 110 } 111 112 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 113 { 114 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 115 vcpu->arch.nmi_injected; 116 } 117 118 static inline bool kvm_exception_is_soft(unsigned int nr) 119 { 120 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 121 } 122 123 static inline bool is_protmode(struct kvm_vcpu *vcpu) 124 { 125 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 126 } 127 128 static inline int is_long_mode(struct kvm_vcpu *vcpu) 129 { 130 #ifdef CONFIG_X86_64 131 return vcpu->arch.efer & EFER_LMA; 132 #else 133 return 0; 134 #endif 135 } 136 137 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 138 { 139 int cs_db, cs_l; 140 141 WARN_ON_ONCE(vcpu->arch.guest_state_protected); 142 143 if (!is_long_mode(vcpu)) 144 return false; 145 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 146 return cs_l; 147 } 148 149 static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) 150 { 151 /* 152 * If running with protected guest state, the CS register is not 153 * accessible. The hypercall register values will have had to been 154 * provided in 64-bit mode, so assume the guest is in 64-bit. 155 */ 156 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); 157 } 158 159 static inline bool x86_exception_has_error_code(unsigned int vector) 160 { 161 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 162 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 163 BIT(PF_VECTOR) | BIT(AC_VECTOR); 164 165 return (1U << vector) & exception_has_error_code; 166 } 167 168 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 169 { 170 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 171 } 172 173 static inline int is_pae(struct kvm_vcpu *vcpu) 174 { 175 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 176 } 177 178 static inline int is_pse(struct kvm_vcpu *vcpu) 179 { 180 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 181 } 182 183 static inline int is_paging(struct kvm_vcpu *vcpu) 184 { 185 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 186 } 187 188 static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 189 { 190 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 191 } 192 193 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 194 { 195 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; 196 } 197 198 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 199 { 200 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); 201 } 202 203 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 204 gva_t gva, gfn_t gfn, unsigned access) 205 { 206 u64 gen = kvm_memslots(vcpu->kvm)->generation; 207 208 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 209 return; 210 211 /* 212 * If this is a shadow nested page table, the "GVA" is 213 * actually a nGPA. 214 */ 215 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 216 vcpu->arch.mmio_access = access; 217 vcpu->arch.mmio_gfn = gfn; 218 vcpu->arch.mmio_gen = gen; 219 } 220 221 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 222 { 223 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 224 } 225 226 /* 227 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 228 * clear all mmio cache info. 229 */ 230 #define MMIO_GVA_ANY (~(gva_t)0) 231 232 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 233 { 234 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 235 return; 236 237 vcpu->arch.mmio_gva = 0; 238 } 239 240 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 241 { 242 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 243 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 244 return true; 245 246 return false; 247 } 248 249 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 250 { 251 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 252 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 253 return true; 254 255 return false; 256 } 257 258 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 259 { 260 unsigned long val = kvm_register_read_raw(vcpu, reg); 261 262 return is_64_bit_mode(vcpu) ? val : (u32)val; 263 } 264 265 static inline void kvm_register_write(struct kvm_vcpu *vcpu, 266 int reg, unsigned long val) 267 { 268 if (!is_64_bit_mode(vcpu)) 269 val = (u32)val; 270 return kvm_register_write_raw(vcpu, reg, val); 271 } 272 273 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 274 { 275 return !(kvm->arch.disabled_quirks & quirk); 276 } 277 278 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 279 280 u64 get_kvmclock_ns(struct kvm *kvm); 281 282 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 283 gva_t addr, void *val, unsigned int bytes, 284 struct x86_exception *exception); 285 286 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 287 gva_t addr, void *val, unsigned int bytes, 288 struct x86_exception *exception); 289 290 int handle_ud(struct kvm_vcpu *vcpu); 291 292 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 293 struct kvm_queued_exception *ex); 294 295 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 296 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 297 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); 298 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 299 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 300 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 301 int page_num); 302 bool kvm_vector_hashing_enabled(void); 303 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 304 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 305 void *insn, int insn_len); 306 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 307 int emulation_type, void *insn, int insn_len); 308 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 309 310 extern u64 host_xcr0; 311 extern u64 host_xss; 312 313 extern struct kvm_caps kvm_caps; 314 315 extern bool enable_pmu; 316 317 static inline bool kvm_mpx_supported(void) 318 { 319 return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 320 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 321 } 322 323 extern unsigned int min_timer_period_us; 324 325 extern bool enable_vmware_backdoor; 326 327 extern int pi_inject_timer; 328 329 extern bool report_ignored_msrs; 330 331 extern bool eager_page_split; 332 333 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 334 { 335 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 336 vcpu->arch.virtual_tsc_shift); 337 } 338 339 /* Same "calling convention" as do_div: 340 * - divide (n << 32) by base 341 * - put result in n 342 * - return remainder 343 */ 344 #define do_shl32_div32(n, base) \ 345 ({ \ 346 u32 __quot, __rem; \ 347 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 348 : "rm" (base), "0" (0), "1" ((u32) n)); \ 349 n = __quot; \ 350 __rem; \ 351 }) 352 353 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 354 { 355 return kvm->arch.mwait_in_guest; 356 } 357 358 static inline bool kvm_hlt_in_guest(struct kvm *kvm) 359 { 360 return kvm->arch.hlt_in_guest; 361 } 362 363 static inline bool kvm_pause_in_guest(struct kvm *kvm) 364 { 365 return kvm->arch.pause_in_guest; 366 } 367 368 static inline bool kvm_cstate_in_guest(struct kvm *kvm) 369 { 370 return kvm->arch.cstate_in_guest; 371 } 372 373 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm) 374 { 375 return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED; 376 } 377 378 enum kvm_intr_type { 379 /* Values are arbitrary, but must be non-zero. */ 380 KVM_HANDLING_IRQ = 1, 381 KVM_HANDLING_NMI, 382 }; 383 384 static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, 385 enum kvm_intr_type intr) 386 { 387 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr); 388 } 389 390 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 391 { 392 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0); 393 } 394 395 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu) 396 { 397 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI; 398 } 399 400 static inline bool kvm_pat_valid(u64 data) 401 { 402 if (data & 0xF8F8F8F8F8F8F8F8ull) 403 return false; 404 /* 0, 1, 4, 5, 6, 7 are valid values. */ 405 return (data | ((data & 0x0202020202020202ull) << 1)) == data; 406 } 407 408 static inline bool kvm_dr7_valid(u64 data) 409 { 410 /* Bits [63:32] are reserved */ 411 return !(data >> 32); 412 } 413 static inline bool kvm_dr6_valid(u64 data) 414 { 415 /* Bits [63:32] are reserved */ 416 return !(data >> 32); 417 } 418 419 /* 420 * Trigger machine check on the host. We assume all the MSRs are already set up 421 * by the CPU and that we still run on the same CPU as the MCE occurred on. 422 * We pass a fake environment to the machine check handler because we want 423 * the guest to be always treated like user space, no matter what context 424 * it used internally. 425 */ 426 static inline void kvm_machine_check(void) 427 { 428 #if defined(CONFIG_X86_MCE) 429 struct pt_regs regs = { 430 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 431 .flags = X86_EFLAGS_IF, 432 }; 433 434 do_machine_check(®s); 435 #endif 436 } 437 438 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 439 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 440 int kvm_spec_ctrl_test_value(u64 value); 441 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 442 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 443 struct x86_exception *e); 444 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 445 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 446 447 /* 448 * Internal error codes that are used to indicate that MSR emulation encountered 449 * an error that should result in #GP in the guest, unless userspace 450 * handles it. 451 */ 452 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 453 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 454 455 #define __cr4_reserved_bits(__cpu_has, __c) \ 456 ({ \ 457 u64 __reserved_bits = CR4_RESERVED_BITS; \ 458 \ 459 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 460 __reserved_bits |= X86_CR4_OSXSAVE; \ 461 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 462 __reserved_bits |= X86_CR4_SMEP; \ 463 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 464 __reserved_bits |= X86_CR4_SMAP; \ 465 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 466 __reserved_bits |= X86_CR4_FSGSBASE; \ 467 if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 468 __reserved_bits |= X86_CR4_PKE; \ 469 if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 470 __reserved_bits |= X86_CR4_LA57; \ 471 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 472 __reserved_bits |= X86_CR4_UMIP; \ 473 if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 474 __reserved_bits |= X86_CR4_VMXE; \ 475 if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 476 __reserved_bits |= X86_CR4_PCIDE; \ 477 __reserved_bits; \ 478 }) 479 480 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 481 void *dst); 482 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 483 void *dst); 484 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 485 unsigned int port, void *data, unsigned int count, 486 int in); 487 488 #endif 489