1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 226eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H 326eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H 426eef70cSAvi Kivity 526eef70cSAvi Kivity #include <linux/kvm_host.h> 63f1a18b9SUros Bizjak #include <asm/mce.h> 78d93c874SMarcelo Tosatti #include <asm/pvclock.h> 83eeb3288SAvi Kivity #include "kvm_cache_regs.h" 92f728d66SSean Christopherson #include "kvm_emulate.h" 1026eef70cSAvi Kivity 11938c8745SSean Christopherson struct kvm_caps { 12938c8745SSean Christopherson /* control of guest tsc rate supported? */ 13938c8745SSean Christopherson bool has_tsc_control; 14938c8745SSean Christopherson /* maximum supported tsc_khz for guests */ 15938c8745SSean Christopherson u32 max_guest_tsc_khz; 16938c8745SSean Christopherson /* number of bits of the fractional part of the TSC scaling ratio */ 17938c8745SSean Christopherson u8 tsc_scaling_ratio_frac_bits; 18938c8745SSean Christopherson /* maximum allowed value of TSC scaling ratio */ 19938c8745SSean Christopherson u64 max_tsc_scaling_ratio; 20938c8745SSean Christopherson /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */ 21938c8745SSean Christopherson u64 default_tsc_scaling_ratio; 22938c8745SSean Christopherson /* bus lock detection supported? */ 23938c8745SSean Christopherson bool has_bus_lock_exit; 242f4073e0STao Xu /* notify VM exit supported? */ 252f4073e0STao Xu bool has_notify_vmexit; 26938c8745SSean Christopherson 27938c8745SSean Christopherson u64 supported_mce_cap; 28938c8745SSean Christopherson u64 supported_xcr0; 29938c8745SSean Christopherson u64 supported_xss; 30bec46859SSean Christopherson u64 supported_perf_cap; 31938c8745SSean Christopherson }; 32938c8745SSean Christopherson 3365297341SUros Bizjak void kvm_spurious_fault(void); 3465297341SUros Bizjak 35648fc8aeSSean Christopherson #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 36648fc8aeSSean Christopherson ({ \ 37648fc8aeSSean Christopherson bool failed = (consistency_check); \ 38648fc8aeSSean Christopherson if (failed) \ 39648fc8aeSSean Christopherson trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 40648fc8aeSSean Christopherson failed; \ 41648fc8aeSSean Christopherson }) 42648fc8aeSSean Christopherson 43c8e88717SBabu Moger #define KVM_DEFAULT_PLE_GAP 128 44c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 45c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_GROW 2 46c8e88717SBabu Moger #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 47c8e88717SBabu Moger #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 488566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 498566ac8bSBabu Moger #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 50c8e88717SBabu Moger 51c8e88717SBabu Moger static inline unsigned int __grow_ple_window(unsigned int val, 52c8e88717SBabu Moger unsigned int base, unsigned int modifier, unsigned int max) 53c8e88717SBabu Moger { 54c8e88717SBabu Moger u64 ret = val; 55c8e88717SBabu Moger 56c8e88717SBabu Moger if (modifier < 1) 57c8e88717SBabu Moger return base; 58c8e88717SBabu Moger 59c8e88717SBabu Moger if (modifier < base) 60c8e88717SBabu Moger ret *= modifier; 61c8e88717SBabu Moger else 62c8e88717SBabu Moger ret += modifier; 63c8e88717SBabu Moger 64c8e88717SBabu Moger return min(ret, (u64)max); 65c8e88717SBabu Moger } 66c8e88717SBabu Moger 67c8e88717SBabu Moger static inline unsigned int __shrink_ple_window(unsigned int val, 68c8e88717SBabu Moger unsigned int base, unsigned int modifier, unsigned int min) 69c8e88717SBabu Moger { 70c8e88717SBabu Moger if (modifier < 1) 71c8e88717SBabu Moger return base; 72c8e88717SBabu Moger 73c8e88717SBabu Moger if (modifier < base) 74c8e88717SBabu Moger val /= modifier; 75c8e88717SBabu Moger else 76c8e88717SBabu Moger val -= modifier; 77c8e88717SBabu Moger 78c8e88717SBabu Moger return max(val, min); 79c8e88717SBabu Moger } 80c8e88717SBabu Moger 8174545705SRadim Krčmář #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 8274545705SRadim Krčmář 8340e5f908SSean Christopherson void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 84cb6a32c2SSean Christopherson int kvm_check_nested_events(struct kvm_vcpu *vcpu); 85cb6a32c2SSean Christopherson 867709aba8SSean Christopherson static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) 877709aba8SSean Christopherson { 887709aba8SSean Christopherson return vcpu->arch.exception.pending || 897055fb11SSean Christopherson vcpu->arch.exception_vmexit.pending || 907055fb11SSean Christopherson kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 917709aba8SSean Christopherson } 927709aba8SSean Christopherson 9326eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 9426eef70cSAvi Kivity { 955c7d4f9aSLiran Alon vcpu->arch.exception.pending = false; 96664f8e26SWanpeng Li vcpu->arch.exception.injected = false; 977709aba8SSean Christopherson vcpu->arch.exception_vmexit.pending = false; 9826eef70cSAvi Kivity } 9926eef70cSAvi Kivity 10066fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 10166fd3f7fSGleb Natapov bool soft) 102937a7eaeSAvi Kivity { 10304140b41SLiran Alon vcpu->arch.interrupt.injected = true; 10466fd3f7fSGleb Natapov vcpu->arch.interrupt.soft = soft; 105937a7eaeSAvi Kivity vcpu->arch.interrupt.nr = vector; 106937a7eaeSAvi Kivity } 107937a7eaeSAvi Kivity 108937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 109937a7eaeSAvi Kivity { 11004140b41SLiran Alon vcpu->arch.interrupt.injected = false; 111937a7eaeSAvi Kivity } 112937a7eaeSAvi Kivity 1133298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 1143298b75cSGleb Natapov { 11504140b41SLiran Alon return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 1163298b75cSGleb Natapov vcpu->arch.nmi_injected; 1173298b75cSGleb Natapov } 11866fd3f7fSGleb Natapov 11966fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr) 12066fd3f7fSGleb Natapov { 12166fd3f7fSGleb Natapov return (nr == BP_VECTOR) || (nr == OF_VECTOR); 12266fd3f7fSGleb Natapov } 123fc61b800SGleb Natapov 1243eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu) 1253eeb3288SAvi Kivity { 1263eeb3288SAvi Kivity return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 1273eeb3288SAvi Kivity } 1283eeb3288SAvi Kivity 129836a1b3cSAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu) 130836a1b3cSAvi Kivity { 131836a1b3cSAvi Kivity #ifdef CONFIG_X86_64 132f6801dffSAvi Kivity return vcpu->arch.efer & EFER_LMA; 133836a1b3cSAvi Kivity #else 134836a1b3cSAvi Kivity return 0; 135836a1b3cSAvi Kivity #endif 136836a1b3cSAvi Kivity } 137836a1b3cSAvi Kivity 1385777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 1395777392eSNadav Amit { 1405777392eSNadav Amit int cs_db, cs_l; 1415777392eSNadav Amit 142b5aead00STom Lendacky WARN_ON_ONCE(vcpu->arch.guest_state_protected); 143b5aead00STom Lendacky 1445777392eSNadav Amit if (!is_long_mode(vcpu)) 1455777392eSNadav Amit return false; 146b3646477SJason Baron static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 1475777392eSNadav Amit return cs_l; 1485777392eSNadav Amit } 1495777392eSNadav Amit 150b5aead00STom Lendacky static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) 151b5aead00STom Lendacky { 152b5aead00STom Lendacky /* 153b5aead00STom Lendacky * If running with protected guest state, the CS register is not 154b5aead00STom Lendacky * accessible. The hypercall register values will have had to been 155b5aead00STom Lendacky * provided in 64-bit mode, so assume the guest is in 64-bit. 156b5aead00STom Lendacky */ 157b5aead00STom Lendacky return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); 158b5aead00STom Lendacky } 159b5aead00STom Lendacky 1600447378aSMarc Orr static inline bool x86_exception_has_error_code(unsigned int vector) 1610447378aSMarc Orr { 1620447378aSMarc Orr static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 1630447378aSMarc Orr BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 1640447378aSMarc Orr BIT(PF_VECTOR) | BIT(AC_VECTOR); 1650447378aSMarc Orr 1660447378aSMarc Orr return (1U << vector) & exception_has_error_code; 1670447378aSMarc Orr } 1680447378aSMarc Orr 1696539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 1706539e738SJoerg Roedel { 1716539e738SJoerg Roedel return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 1726539e738SJoerg Roedel } 1736539e738SJoerg Roedel 174836a1b3cSAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu) 175836a1b3cSAvi Kivity { 176836a1b3cSAvi Kivity return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 177836a1b3cSAvi Kivity } 178836a1b3cSAvi Kivity 179836a1b3cSAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu) 180836a1b3cSAvi Kivity { 181836a1b3cSAvi Kivity return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 182836a1b3cSAvi Kivity } 183836a1b3cSAvi Kivity 184836a1b3cSAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu) 185836a1b3cSAvi Kivity { 186c36fc04eSDavidlohr Bueso return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 187836a1b3cSAvi Kivity } 188836a1b3cSAvi Kivity 189bf03d4f9SPaolo Bonzini static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 190bf03d4f9SPaolo Bonzini { 191bf03d4f9SPaolo Bonzini return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 192bf03d4f9SPaolo Bonzini } 193bf03d4f9SPaolo Bonzini 194fd8cb433SYu Zhang static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 195fd8cb433SYu Zhang { 196fd8cb433SYu Zhang return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; 197fd8cb433SYu Zhang } 198fd8cb433SYu Zhang 199fd8cb433SYu Zhang static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 200fd8cb433SYu Zhang { 2011fb85d06SAdrian Hunter return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); 202fd8cb433SYu Zhang } 203fd8cb433SYu Zhang 204bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 205bebb106aSXiao Guangrong gva_t gva, gfn_t gfn, unsigned access) 206bebb106aSXiao Guangrong { 207ddfd1730SSean Christopherson u64 gen = kvm_memslots(vcpu->kvm)->generation; 208ddfd1730SSean Christopherson 209361209e0SSean Christopherson if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 210ddfd1730SSean Christopherson return; 211ddfd1730SSean Christopherson 2129034e6e8SPaolo Bonzini /* 2139034e6e8SPaolo Bonzini * If this is a shadow nested page table, the "GVA" is 2149034e6e8SPaolo Bonzini * actually a nGPA. 2159034e6e8SPaolo Bonzini */ 2169034e6e8SPaolo Bonzini vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 217871bd034SSean Christopherson vcpu->arch.mmio_access = access; 218bebb106aSXiao Guangrong vcpu->arch.mmio_gfn = gfn; 219ddfd1730SSean Christopherson vcpu->arch.mmio_gen = gen; 22056f17dd3SDavid Matlack } 22156f17dd3SDavid Matlack 22256f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 22356f17dd3SDavid Matlack { 22456f17dd3SDavid Matlack return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 225bebb106aSXiao Guangrong } 226bebb106aSXiao Guangrong 227bebb106aSXiao Guangrong /* 22856f17dd3SDavid Matlack * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 22956f17dd3SDavid Matlack * clear all mmio cache info. 230bebb106aSXiao Guangrong */ 23156f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0) 23256f17dd3SDavid Matlack 233bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 234bebb106aSXiao Guangrong { 23556f17dd3SDavid Matlack if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 236bebb106aSXiao Guangrong return; 237bebb106aSXiao Guangrong 238bebb106aSXiao Guangrong vcpu->arch.mmio_gva = 0; 239bebb106aSXiao Guangrong } 240bebb106aSXiao Guangrong 241bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 242bebb106aSXiao Guangrong { 24356f17dd3SDavid Matlack if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 24456f17dd3SDavid Matlack vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 245bebb106aSXiao Guangrong return true; 246bebb106aSXiao Guangrong 247bebb106aSXiao Guangrong return false; 248bebb106aSXiao Guangrong } 249bebb106aSXiao Guangrong 250bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 251bebb106aSXiao Guangrong { 25256f17dd3SDavid Matlack if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 25356f17dd3SDavid Matlack vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 254bebb106aSXiao Guangrong return true; 255bebb106aSXiao Guangrong 256bebb106aSXiao Guangrong return false; 257bebb106aSXiao Guangrong } 258bebb106aSXiao Guangrong 25927b4a9c4SSean Christopherson static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 2605777392eSNadav Amit { 26127b4a9c4SSean Christopherson unsigned long val = kvm_register_read_raw(vcpu, reg); 2625777392eSNadav Amit 2635777392eSNadav Amit return is_64_bit_mode(vcpu) ? val : (u32)val; 2645777392eSNadav Amit } 2655777392eSNadav Amit 26627b4a9c4SSean Christopherson static inline void kvm_register_write(struct kvm_vcpu *vcpu, 267489cbcf0SSean Christopherson int reg, unsigned long val) 26827e6fb5dSNadav Amit { 26927e6fb5dSNadav Amit if (!is_64_bit_mode(vcpu)) 27027e6fb5dSNadav Amit val = (u32)val; 27127b4a9c4SSean Christopherson return kvm_register_write_raw(vcpu, reg, val); 27227e6fb5dSNadav Amit } 27327e6fb5dSNadav Amit 27441dbc6bcSPaolo Bonzini static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 27541dbc6bcSPaolo Bonzini { 27641dbc6bcSPaolo Bonzini return !(kvm->arch.disabled_quirks & quirk); 27741dbc6bcSPaolo Bonzini } 27841dbc6bcSPaolo Bonzini 2799497e1f2SSean Christopherson void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 280ff9d07a0SZhang, Yanmin 281108b249cSPaolo Bonzini u64 get_kvmclock_ns(struct kvm *kvm); 28299e3e30aSZachary Amsden 283ce14e868SPaolo Bonzini int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 284064aea77SNadav Har'El gva_t addr, void *val, unsigned int bytes, 285064aea77SNadav Har'El struct x86_exception *exception); 286064aea77SNadav Har'El 287ce14e868SPaolo Bonzini int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 2886a4d7550SNadav Har'El gva_t addr, void *val, unsigned int bytes, 2896a4d7550SNadav Har'El struct x86_exception *exception); 2906a4d7550SNadav Har'El 291082d06edSWanpeng Li int handle_ud(struct kvm_vcpu *vcpu); 292082d06edSWanpeng Li 293d4963e31SSean Christopherson void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 294d4963e31SSean Christopherson struct kvm_queued_exception *ex); 295da998b46SJim Mattson 29619efffa2SXiao Guangrong void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 297ff53604bSXiao Guangrong u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 2984566654bSNadav Amit bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); 299ff53604bSXiao Guangrong int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 300ff53604bSXiao Guangrong int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 3016a39bbc5SXiao Guangrong bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 3026a39bbc5SXiao Guangrong int page_num); 30352004014SFeng Wu bool kvm_vector_hashing_enabled(void); 30489786147SMohammed Gamal void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 3054aa2691dSWei Huang int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 3064aa2691dSWei Huang void *insn, int insn_len); 307736c291cSSean Christopherson int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 308c60658d1SSean Christopherson int emulation_type, void *insn, int insn_len); 309404d5d7bSWanpeng Li fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 3104566654bSNadav Amit 31100b27a3eSAvi Kivity extern u64 host_xcr0; 31286137773STom Lendacky extern u64 host_xss; 313938c8745SSean Christopherson 314938c8745SSean Christopherson extern struct kvm_caps kvm_caps; 315938c8745SSean Christopherson 3164732f244SLike Xu extern bool enable_pmu; 3174ff41732SPaolo Bonzini 318*6be3ae45SAaron Lewis /* 319*6be3ae45SAaron Lewis * Get a filtered version of KVM's supported XCR0 that strips out dynamic 320*6be3ae45SAaron Lewis * features for which the current process doesn't (yet) have permission to use. 321*6be3ae45SAaron Lewis * This is intended to be used only when enumerating support to userspace, 322*6be3ae45SAaron Lewis * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be 323*6be3ae45SAaron Lewis * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if 324*6be3ae45SAaron Lewis * userspace attempts to enable unpermitted features. 325*6be3ae45SAaron Lewis */ 326*6be3ae45SAaron Lewis static inline u64 kvm_get_filtered_xcr0(void) 327*6be3ae45SAaron Lewis { 328*6be3ae45SAaron Lewis return kvm_caps.supported_xcr0 & xstate_get_guest_group_perm(); 329*6be3ae45SAaron Lewis } 330*6be3ae45SAaron Lewis 331615a4ae1SSean Christopherson static inline bool kvm_mpx_supported(void) 332615a4ae1SSean Christopherson { 333938c8745SSean Christopherson return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 334615a4ae1SSean Christopherson == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 335615a4ae1SSean Christopherson } 336615a4ae1SSean Christopherson 3379ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us; 3389ed96e87SMarcelo Tosatti 339c4ae60e4SLiran Alon extern bool enable_vmware_backdoor; 340c4ae60e4SLiran Alon 3410c5f81daSWanpeng Li extern int pi_inject_timer; 3420c5f81daSWanpeng Li 343d855066fSLike Xu extern bool report_ignored_msrs; 344d855066fSLike Xu 345cb00a70bSDavid Matlack extern bool eager_page_split; 346cb00a70bSDavid Matlack 347e76ae527SSean Christopherson static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 348e76ae527SSean Christopherson { 349e76ae527SSean Christopherson if (report_ignored_msrs) 350e76ae527SSean Christopherson vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data); 351e76ae527SSean Christopherson } 352e76ae527SSean Christopherson 353e76ae527SSean Christopherson static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr) 354e76ae527SSean Christopherson { 355e76ae527SSean Christopherson if (report_ignored_msrs) 356e76ae527SSean Christopherson vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr); 357e76ae527SSean Christopherson } 358e76ae527SSean Christopherson 3598d93c874SMarcelo Tosatti static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 3608d93c874SMarcelo Tosatti { 3618d93c874SMarcelo Tosatti return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 3628d93c874SMarcelo Tosatti vcpu->arch.virtual_tsc_shift); 3638d93c874SMarcelo Tosatti } 3648d93c874SMarcelo Tosatti 365b51012deSPaolo Bonzini /* Same "calling convention" as do_div: 366b51012deSPaolo Bonzini * - divide (n << 32) by base 367b51012deSPaolo Bonzini * - put result in n 368b51012deSPaolo Bonzini * - return remainder 369b51012deSPaolo Bonzini */ 370b51012deSPaolo Bonzini #define do_shl32_div32(n, base) \ 371b51012deSPaolo Bonzini ({ \ 372b51012deSPaolo Bonzini u32 __quot, __rem; \ 373b51012deSPaolo Bonzini asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 374b51012deSPaolo Bonzini : "rm" (base), "0" (0), "1" ((u32) n)); \ 375b51012deSPaolo Bonzini n = __quot; \ 376b51012deSPaolo Bonzini __rem; \ 377b51012deSPaolo Bonzini }) 378b51012deSPaolo Bonzini 3794d5422ceSWanpeng Li static inline bool kvm_mwait_in_guest(struct kvm *kvm) 380668fffa3SMichael S. Tsirkin { 3814d5422ceSWanpeng Li return kvm->arch.mwait_in_guest; 382668fffa3SMichael S. Tsirkin } 383668fffa3SMichael S. Tsirkin 384caa057a2SWanpeng Li static inline bool kvm_hlt_in_guest(struct kvm *kvm) 385caa057a2SWanpeng Li { 386caa057a2SWanpeng Li return kvm->arch.hlt_in_guest; 387caa057a2SWanpeng Li } 388caa057a2SWanpeng Li 389b31c114bSWanpeng Li static inline bool kvm_pause_in_guest(struct kvm *kvm) 390b31c114bSWanpeng Li { 391b31c114bSWanpeng Li return kvm->arch.pause_in_guest; 392b31c114bSWanpeng Li } 393b31c114bSWanpeng Li 394b5170063SWanpeng Li static inline bool kvm_cstate_in_guest(struct kvm *kvm) 395b5170063SWanpeng Li { 396b5170063SWanpeng Li return kvm->arch.cstate_in_guest; 397b5170063SWanpeng Li } 398b5170063SWanpeng Li 3992f4073e0STao Xu static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm) 4002f4073e0STao Xu { 4012f4073e0STao Xu return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED; 4022f4073e0STao Xu } 4032f4073e0STao Xu 404db215756SSean Christopherson enum kvm_intr_type { 405db215756SSean Christopherson /* Values are arbitrary, but must be non-zero. */ 406db215756SSean Christopherson KVM_HANDLING_IRQ = 1, 407db215756SSean Christopherson KVM_HANDLING_NMI, 408db215756SSean Christopherson }; 409dd60d217SAndi Kleen 41011df586dSSean Christopherson static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, 411db215756SSean Christopherson enum kvm_intr_type intr) 412dd60d217SAndi Kleen { 413db215756SSean Christopherson WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr); 414dd60d217SAndi Kleen } 415dd60d217SAndi Kleen 41611df586dSSean Christopherson static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 417dd60d217SAndi Kleen { 41873cd107bSSean Christopherson WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0); 419dd60d217SAndi Kleen } 420dd60d217SAndi Kleen 42173cd107bSSean Christopherson static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu) 42273cd107bSSean Christopherson { 423db215756SSean Christopherson return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI; 42473cd107bSSean Christopherson } 425674ea351SPaolo Bonzini 426674ea351SPaolo Bonzini static inline bool kvm_pat_valid(u64 data) 427674ea351SPaolo Bonzini { 428674ea351SPaolo Bonzini if (data & 0xF8F8F8F8F8F8F8F8ull) 429674ea351SPaolo Bonzini return false; 430674ea351SPaolo Bonzini /* 0, 1, 4, 5, 6, 7 are valid values. */ 431674ea351SPaolo Bonzini return (data | ((data & 0x0202020202020202ull) << 1)) == data; 432674ea351SPaolo Bonzini } 433674ea351SPaolo Bonzini 4349b5e8532SSean Christopherson static inline bool kvm_dr7_valid(u64 data) 435b91991bfSKrish Sadhukhan { 436b91991bfSKrish Sadhukhan /* Bits [63:32] are reserved */ 437b91991bfSKrish Sadhukhan return !(data >> 32); 438b91991bfSKrish Sadhukhan } 439f5f6145eSKrish Sadhukhan static inline bool kvm_dr6_valid(u64 data) 440f5f6145eSKrish Sadhukhan { 441f5f6145eSKrish Sadhukhan /* Bits [63:32] are reserved */ 442f5f6145eSKrish Sadhukhan return !(data >> 32); 443f5f6145eSKrish Sadhukhan } 444b91991bfSKrish Sadhukhan 4453f1a18b9SUros Bizjak /* 4463f1a18b9SUros Bizjak * Trigger machine check on the host. We assume all the MSRs are already set up 4473f1a18b9SUros Bizjak * by the CPU and that we still run on the same CPU as the MCE occurred on. 4483f1a18b9SUros Bizjak * We pass a fake environment to the machine check handler because we want 4493f1a18b9SUros Bizjak * the guest to be always treated like user space, no matter what context 4503f1a18b9SUros Bizjak * it used internally. 4513f1a18b9SUros Bizjak */ 4523f1a18b9SUros Bizjak static inline void kvm_machine_check(void) 4533f1a18b9SUros Bizjak { 4543f1a18b9SUros Bizjak #if defined(CONFIG_X86_MCE) 4553f1a18b9SUros Bizjak struct pt_regs regs = { 4563f1a18b9SUros Bizjak .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 4573f1a18b9SUros Bizjak .flags = X86_EFLAGS_IF, 4583f1a18b9SUros Bizjak }; 4593f1a18b9SUros Bizjak 4603f1a18b9SUros Bizjak do_machine_check(®s); 4613f1a18b9SUros Bizjak #endif 4623f1a18b9SUros Bizjak } 4633f1a18b9SUros Bizjak 464139a12cfSAaron Lewis void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 465139a12cfSAaron Lewis void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 466841c2be0SMaxim Levitsky int kvm_spec_ctrl_test_value(u64 value); 467c33f6f22SSean Christopherson bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 4683f3393b3SBabu Moger int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 4693f3393b3SBabu Moger struct x86_exception *e); 4709715092fSBabu Moger int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 47151de8151SAlexander Graf bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 472674ea351SPaolo Bonzini 473cc4cb017SMaxim Levitsky /* 474cc4cb017SMaxim Levitsky * Internal error codes that are used to indicate that MSR emulation encountered 475cc4cb017SMaxim Levitsky * an error that should result in #GP in the guest, unless userspace 476cc4cb017SMaxim Levitsky * handles it. 477cc4cb017SMaxim Levitsky */ 478cc4cb017SMaxim Levitsky #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 479cc4cb017SMaxim Levitsky #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 4806abe9c13SPeter Xu 481b899c132SKrish Sadhukhan #define __cr4_reserved_bits(__cpu_has, __c) \ 482b899c132SKrish Sadhukhan ({ \ 483b899c132SKrish Sadhukhan u64 __reserved_bits = CR4_RESERVED_BITS; \ 484b899c132SKrish Sadhukhan \ 485b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 486b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_OSXSAVE; \ 487b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 488b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_SMEP; \ 489b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 490b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_SMAP; \ 491b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 492b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_FSGSBASE; \ 493b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 494b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_PKE; \ 495b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 496b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_LA57; \ 497b899c132SKrish Sadhukhan if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 498b899c132SKrish Sadhukhan __reserved_bits |= X86_CR4_UMIP; \ 49953efe527SPaolo Bonzini if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 50053efe527SPaolo Bonzini __reserved_bits |= X86_CR4_VMXE; \ 5014683d758SVitaly Kuznetsov if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 5024683d758SVitaly Kuznetsov __reserved_bits |= X86_CR4_PCIDE; \ 503b899c132SKrish Sadhukhan __reserved_bits; \ 504b899c132SKrish Sadhukhan }) 505b899c132SKrish Sadhukhan 5068f423a80STom Lendacky int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 5078f423a80STom Lendacky void *dst); 5088f423a80STom Lendacky int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 5098f423a80STom Lendacky void *dst); 5107ed9abfeSTom Lendacky int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 5117ed9abfeSTom Lendacky unsigned int port, void *data, unsigned int count, 5127ed9abfeSTom Lendacky int in); 5138f423a80STom Lendacky 51426eef70cSAvi Kivity #endif 515