1 #ifndef ARCH_X86_KVM_X86_H 2 #define ARCH_X86_KVM_X86_H 3 4 #include <asm/processor.h> 5 #include <asm/mwait.h> 6 #include <linux/kvm_host.h> 7 #include <asm/pvclock.h> 8 #include "kvm_cache_regs.h" 9 10 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 11 12 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 13 { 14 vcpu->arch.exception.pending = false; 15 } 16 17 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 18 bool soft) 19 { 20 vcpu->arch.interrupt.pending = true; 21 vcpu->arch.interrupt.soft = soft; 22 vcpu->arch.interrupt.nr = vector; 23 } 24 25 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 26 { 27 vcpu->arch.interrupt.pending = false; 28 } 29 30 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 31 { 32 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || 33 vcpu->arch.nmi_injected; 34 } 35 36 static inline bool kvm_exception_is_soft(unsigned int nr) 37 { 38 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 39 } 40 41 static inline bool is_protmode(struct kvm_vcpu *vcpu) 42 { 43 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 44 } 45 46 static inline int is_long_mode(struct kvm_vcpu *vcpu) 47 { 48 #ifdef CONFIG_X86_64 49 return vcpu->arch.efer & EFER_LMA; 50 #else 51 return 0; 52 #endif 53 } 54 55 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 56 { 57 int cs_db, cs_l; 58 59 if (!is_long_mode(vcpu)) 60 return false; 61 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 62 return cs_l; 63 } 64 65 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 66 { 67 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 68 } 69 70 static inline int is_pae(struct kvm_vcpu *vcpu) 71 { 72 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 73 } 74 75 static inline int is_pse(struct kvm_vcpu *vcpu) 76 { 77 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 78 } 79 80 static inline int is_paging(struct kvm_vcpu *vcpu) 81 { 82 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); 83 } 84 85 static inline u32 bit(int bitno) 86 { 87 return 1 << (bitno & 31); 88 } 89 90 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 91 gva_t gva, gfn_t gfn, unsigned access) 92 { 93 vcpu->arch.mmio_gva = gva & PAGE_MASK; 94 vcpu->arch.access = access; 95 vcpu->arch.mmio_gfn = gfn; 96 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; 97 } 98 99 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 100 { 101 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 102 } 103 104 /* 105 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 106 * clear all mmio cache info. 107 */ 108 #define MMIO_GVA_ANY (~(gva_t)0) 109 110 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 111 { 112 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 113 return; 114 115 vcpu->arch.mmio_gva = 0; 116 } 117 118 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 119 { 120 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 121 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 122 return true; 123 124 return false; 125 } 126 127 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 128 { 129 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 130 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 131 return true; 132 133 return false; 134 } 135 136 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, 137 enum kvm_reg reg) 138 { 139 unsigned long val = kvm_register_read(vcpu, reg); 140 141 return is_64_bit_mode(vcpu) ? val : (u32)val; 142 } 143 144 static inline void kvm_register_writel(struct kvm_vcpu *vcpu, 145 enum kvm_reg reg, 146 unsigned long val) 147 { 148 if (!is_64_bit_mode(vcpu)) 149 val = (u32)val; 150 return kvm_register_write(vcpu, reg, val); 151 } 152 153 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 154 { 155 return !(kvm->arch.disabled_quirks & quirk); 156 } 157 158 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 159 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 160 void kvm_set_pending_timer(struct kvm_vcpu *vcpu); 161 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 162 163 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); 164 u64 get_kvmclock_ns(struct kvm *kvm); 165 166 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 167 gva_t addr, void *val, unsigned int bytes, 168 struct x86_exception *exception); 169 170 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, 171 gva_t addr, void *val, unsigned int bytes, 172 struct x86_exception *exception); 173 174 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 175 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 176 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); 177 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 178 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 179 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 180 int page_num); 181 bool kvm_vector_hashing_enabled(void); 182 183 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 184 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 185 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 186 | XFEATURE_MASK_PKRU) 187 extern u64 host_xcr0; 188 189 extern u64 kvm_supported_xcr0(void); 190 191 extern unsigned int min_timer_period_us; 192 193 extern unsigned int lapic_timer_advance_ns; 194 195 extern struct static_key kvm_no_apic_vcpu; 196 197 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 198 { 199 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 200 vcpu->arch.virtual_tsc_shift); 201 } 202 203 /* Same "calling convention" as do_div: 204 * - divide (n << 32) by base 205 * - put result in n 206 * - return remainder 207 */ 208 #define do_shl32_div32(n, base) \ 209 ({ \ 210 u32 __quot, __rem; \ 211 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 212 : "rm" (base), "0" (0), "1" ((u32) n)); \ 213 n = __quot; \ 214 __rem; \ 215 }) 216 217 static inline bool kvm_mwait_in_guest(void) 218 { 219 unsigned int eax, ebx, ecx, edx; 220 221 if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) 222 return false; 223 224 switch (boot_cpu_data.x86_vendor) { 225 case X86_VENDOR_AMD: 226 /* All AMD CPUs have a working MWAIT implementation */ 227 return true; 228 case X86_VENDOR_INTEL: 229 /* Handle Intel below */ 230 break; 231 default: 232 return false; 233 } 234 235 /* 236 * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as 237 * they would allow guest to stop the CPU completely by disabling 238 * interrupts then invoking MWAIT. 239 */ 240 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 241 return false; 242 243 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); 244 245 if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) 246 return false; 247 248 return true; 249 } 250 251 #endif 252