126eef70cSAvi Kivity #ifndef ARCH_X86_KVM_X86_H 226eef70cSAvi Kivity #define ARCH_X86_KVM_X86_H 326eef70cSAvi Kivity 426eef70cSAvi Kivity #include <linux/kvm_host.h> 53eeb3288SAvi Kivity #include "kvm_cache_regs.h" 626eef70cSAvi Kivity 726eef70cSAvi Kivity static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 826eef70cSAvi Kivity { 926eef70cSAvi Kivity vcpu->arch.exception.pending = false; 1026eef70cSAvi Kivity } 1126eef70cSAvi Kivity 1266fd3f7fSGleb Natapov static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 1366fd3f7fSGleb Natapov bool soft) 14937a7eaeSAvi Kivity { 15937a7eaeSAvi Kivity vcpu->arch.interrupt.pending = true; 1666fd3f7fSGleb Natapov vcpu->arch.interrupt.soft = soft; 17937a7eaeSAvi Kivity vcpu->arch.interrupt.nr = vector; 18937a7eaeSAvi Kivity } 19937a7eaeSAvi Kivity 20937a7eaeSAvi Kivity static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 21937a7eaeSAvi Kivity { 22937a7eaeSAvi Kivity vcpu->arch.interrupt.pending = false; 23937a7eaeSAvi Kivity } 24937a7eaeSAvi Kivity 253298b75cSGleb Natapov static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 263298b75cSGleb Natapov { 273298b75cSGleb Natapov return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || 283298b75cSGleb Natapov vcpu->arch.nmi_injected; 293298b75cSGleb Natapov } 3066fd3f7fSGleb Natapov 3166fd3f7fSGleb Natapov static inline bool kvm_exception_is_soft(unsigned int nr) 3266fd3f7fSGleb Natapov { 3366fd3f7fSGleb Natapov return (nr == BP_VECTOR) || (nr == OF_VECTOR); 3466fd3f7fSGleb Natapov } 35fc61b800SGleb Natapov 363eeb3288SAvi Kivity static inline bool is_protmode(struct kvm_vcpu *vcpu) 373eeb3288SAvi Kivity { 383eeb3288SAvi Kivity return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 393eeb3288SAvi Kivity } 403eeb3288SAvi Kivity 41836a1b3cSAvi Kivity static inline int is_long_mode(struct kvm_vcpu *vcpu) 42836a1b3cSAvi Kivity { 43836a1b3cSAvi Kivity #ifdef CONFIG_X86_64 44f6801dffSAvi Kivity return vcpu->arch.efer & EFER_LMA; 45836a1b3cSAvi Kivity #else 46836a1b3cSAvi Kivity return 0; 47836a1b3cSAvi Kivity #endif 48836a1b3cSAvi Kivity } 49836a1b3cSAvi Kivity 505777392eSNadav Amit static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 515777392eSNadav Amit { 525777392eSNadav Amit int cs_db, cs_l; 535777392eSNadav Amit 545777392eSNadav Amit if (!is_long_mode(vcpu)) 555777392eSNadav Amit return false; 565777392eSNadav Amit kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 575777392eSNadav Amit return cs_l; 585777392eSNadav Amit } 595777392eSNadav Amit 606539e738SJoerg Roedel static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 616539e738SJoerg Roedel { 626539e738SJoerg Roedel return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 636539e738SJoerg Roedel } 646539e738SJoerg Roedel 65836a1b3cSAvi Kivity static inline int is_pae(struct kvm_vcpu *vcpu) 66836a1b3cSAvi Kivity { 67836a1b3cSAvi Kivity return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 68836a1b3cSAvi Kivity } 69836a1b3cSAvi Kivity 70836a1b3cSAvi Kivity static inline int is_pse(struct kvm_vcpu *vcpu) 71836a1b3cSAvi Kivity { 72836a1b3cSAvi Kivity return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 73836a1b3cSAvi Kivity } 74836a1b3cSAvi Kivity 75836a1b3cSAvi Kivity static inline int is_paging(struct kvm_vcpu *vcpu) 76836a1b3cSAvi Kivity { 77c36fc04eSDavidlohr Bueso return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 78836a1b3cSAvi Kivity } 79836a1b3cSAvi Kivity 8024d1b15fSJoerg Roedel static inline u32 bit(int bitno) 8124d1b15fSJoerg Roedel { 8224d1b15fSJoerg Roedel return 1 << (bitno & 31); 8324d1b15fSJoerg Roedel } 8424d1b15fSJoerg Roedel 85bebb106aSXiao Guangrong static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 86bebb106aSXiao Guangrong gva_t gva, gfn_t gfn, unsigned access) 87bebb106aSXiao Guangrong { 88bebb106aSXiao Guangrong vcpu->arch.mmio_gva = gva & PAGE_MASK; 89bebb106aSXiao Guangrong vcpu->arch.access = access; 90bebb106aSXiao Guangrong vcpu->arch.mmio_gfn = gfn; 91*56f17dd3SDavid Matlack vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; 92*56f17dd3SDavid Matlack } 93*56f17dd3SDavid Matlack 94*56f17dd3SDavid Matlack static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 95*56f17dd3SDavid Matlack { 96*56f17dd3SDavid Matlack return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 97bebb106aSXiao Guangrong } 98bebb106aSXiao Guangrong 99bebb106aSXiao Guangrong /* 100*56f17dd3SDavid Matlack * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 101*56f17dd3SDavid Matlack * clear all mmio cache info. 102bebb106aSXiao Guangrong */ 103*56f17dd3SDavid Matlack #define MMIO_GVA_ANY (~(gva_t)0) 104*56f17dd3SDavid Matlack 105bebb106aSXiao Guangrong static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 106bebb106aSXiao Guangrong { 107*56f17dd3SDavid Matlack if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 108bebb106aSXiao Guangrong return; 109bebb106aSXiao Guangrong 110bebb106aSXiao Guangrong vcpu->arch.mmio_gva = 0; 111bebb106aSXiao Guangrong } 112bebb106aSXiao Guangrong 113bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 114bebb106aSXiao Guangrong { 115*56f17dd3SDavid Matlack if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 116*56f17dd3SDavid Matlack vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 117bebb106aSXiao Guangrong return true; 118bebb106aSXiao Guangrong 119bebb106aSXiao Guangrong return false; 120bebb106aSXiao Guangrong } 121bebb106aSXiao Guangrong 122bebb106aSXiao Guangrong static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 123bebb106aSXiao Guangrong { 124*56f17dd3SDavid Matlack if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 125*56f17dd3SDavid Matlack vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 126bebb106aSXiao Guangrong return true; 127bebb106aSXiao Guangrong 128bebb106aSXiao Guangrong return false; 129bebb106aSXiao Guangrong } 130bebb106aSXiao Guangrong 1315777392eSNadav Amit static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, 1325777392eSNadav Amit enum kvm_reg reg) 1335777392eSNadav Amit { 1345777392eSNadav Amit unsigned long val = kvm_register_read(vcpu, reg); 1355777392eSNadav Amit 1365777392eSNadav Amit return is_64_bit_mode(vcpu) ? val : (u32)val; 1375777392eSNadav Amit } 1385777392eSNadav Amit 13927e6fb5dSNadav Amit static inline void kvm_register_writel(struct kvm_vcpu *vcpu, 14027e6fb5dSNadav Amit enum kvm_reg reg, 14127e6fb5dSNadav Amit unsigned long val) 14227e6fb5dSNadav Amit { 14327e6fb5dSNadav Amit if (!is_64_bit_mode(vcpu)) 14427e6fb5dSNadav Amit val = (u32)val; 14527e6fb5dSNadav Amit return kvm_register_write(vcpu, reg, val); 14627e6fb5dSNadav Amit } 14727e6fb5dSNadav Amit 148ff9d07a0SZhang, Yanmin void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 149ff9d07a0SZhang, Yanmin void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 15071f9833bSSerge E. Hallyn int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 151ff9d07a0SZhang, Yanmin 1528fe8ab46SWill Auld void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); 15399e3e30aSZachary Amsden 154064aea77SNadav Har'El int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 155064aea77SNadav Har'El gva_t addr, void *val, unsigned int bytes, 156064aea77SNadav Har'El struct x86_exception *exception); 157064aea77SNadav Har'El 1586a4d7550SNadav Har'El int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 1596a4d7550SNadav Har'El gva_t addr, void *val, unsigned int bytes, 1606a4d7550SNadav Har'El struct x86_exception *exception); 1616a4d7550SNadav Har'El 162390bd528SLiu, Jinsong #define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ 163390bd528SLiu, Jinsong | XSTATE_BNDREGS | XSTATE_BNDCSR) 16400b27a3eSAvi Kivity extern u64 host_xcr0; 16500b27a3eSAvi Kivity 1664ff41732SPaolo Bonzini extern u64 kvm_supported_xcr0(void); 1674ff41732SPaolo Bonzini 1689ed96e87SMarcelo Tosatti extern unsigned int min_timer_period_us; 1699ed96e87SMarcelo Tosatti 17054e9818fSGleb Natapov extern struct static_key kvm_no_apic_vcpu; 17126eef70cSAvi Kivity #endif 172