1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * KVM Microsoft Hyper-V emulation 4 * 5 * derived from arch/x86/kvm/x86.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> 12 * 13 * Authors: 14 * Avi Kivity <avi@qumranet.com> 15 * Yaniv Kamay <yaniv@qumranet.com> 16 * Amit Shah <amit.shah@qumranet.com> 17 * Ben-Ami Yassour <benami@il.ibm.com> 18 * Andrey Smetanin <asmetanin@virtuozzo.com> 19 */ 20 21 #ifndef __ARCH_X86_KVM_HYPERV_H__ 22 #define __ARCH_X86_KVM_HYPERV_H__ 23 24 #include <linux/kvm_host.h> 25 #include "x86.h" 26 27 #ifdef CONFIG_KVM_HYPERV 28 29 /* "Hv#1" signature */ 30 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 31 32 /* 33 * The #defines related to the synthetic debugger are required by KDNet, but 34 * they are not documented in the Hyper-V TLFS because the synthetic debugger 35 * functionality has been deprecated and is subject to removal in future 36 * versions of Windows. 37 */ 38 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080 39 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081 40 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082 41 42 /* 43 * Hyper-V synthetic debugger platform capabilities 44 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits. 45 */ 46 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) 47 48 /* Hyper-V Synthetic debug options MSR */ 49 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 50 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 51 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 52 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 53 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 54 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF 55 56 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ 57 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) 58 59 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm) 60 { 61 return &kvm->arch.hyperv; 62 } 63 64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu) 65 { 66 return vcpu->arch.hyperv; 67 } 68 69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu) 70 { 71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 72 73 return &hv_vcpu->synic; 74 } 75 76 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) 77 { 78 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic); 79 80 return hv_vcpu->vcpu; 81 } 82 83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu) 84 { 85 return &vcpu->kvm->arch.hyperv.hv_syndbg; 86 } 87 88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) 89 { 90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 91 92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; 93 } 94 95 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); 96 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); 97 98 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) 99 { 100 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; 101 } 102 103 int kvm_hv_hypercall(struct kvm_vcpu *vcpu); 104 105 void kvm_hv_irq_routing_update(struct kvm *kvm); 106 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); 107 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); 108 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); 109 110 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) 111 { 112 return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap); 113 } 114 115 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) 116 { 117 return to_hv_vcpu(vcpu) && 118 test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap); 119 } 120 121 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); 122 123 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); 124 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu); 125 126 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, 127 int timer_index) 128 { 129 return &to_hv_vcpu(vcpu)->stimer[timer_index]; 130 } 131 132 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) 133 { 134 struct kvm_vcpu_hv *hv_vcpu; 135 136 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, 137 stimer[0]); 138 return hv_vcpu->vcpu; 139 } 140 141 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) 142 { 143 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 144 145 if (!hv_vcpu) 146 return false; 147 148 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, 149 HV_SYNIC_STIMER_COUNT); 150 } 151 152 /* 153 * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8]) 154 * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to. 155 */ 156 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) 157 { 158 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 159 160 /* 161 * If Hyper-V's invariant TSC control is not exposed to the guest, 162 * the invariant TSC CPUID flag is not suppressed, Windows guests were 163 * observed to be able to handle it correctly. Going forward, VMMs are 164 * encouraged to enable Hyper-V's invariant TSC control when invariant 165 * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V. 166 */ 167 if (!hv_vcpu || 168 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT)) 169 return false; 170 171 /* 172 * If Hyper-V's invariant TSC control is exposed to the guest, KVM is 173 * responsible for suppressing the invariant TSC CPUID flag if the 174 * Hyper-V control is not enabled. 175 */ 176 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC); 177 } 178 179 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); 180 181 void kvm_hv_setup_tsc_page(struct kvm *kvm, 182 struct pvclock_vcpu_time_info *hv_clock); 183 void kvm_hv_request_tsc_page_update(struct kvm *kvm); 184 185 void kvm_hv_init_vm(struct kvm *kvm); 186 void kvm_hv_destroy_vm(struct kvm *kvm); 187 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); 188 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled); 189 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce); 190 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); 191 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 192 struct kvm_cpuid_entry2 __user *entries); 193 194 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu, 195 bool is_guest_mode) 196 { 197 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 198 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO : 199 HV_L1_TLB_FLUSH_FIFO; 200 201 return &hv_vcpu->tlb_flush_fifo[i]; 202 } 203 204 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) 205 { 206 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; 207 208 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu)) 209 return; 210 211 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); 212 213 kfifo_reset_out(&tlb_flush_fifo->entries); 214 } 215 216 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) 217 { 218 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 219 220 return hv_vcpu && 221 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH); 222 } 223 224 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) 225 { 226 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 227 u16 code; 228 229 if (!hv_vcpu) 230 return false; 231 232 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) : 233 kvm_rax_read(vcpu); 234 235 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || 236 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || 237 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || 238 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX); 239 } 240 241 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) 242 { 243 if (!to_hv_vcpu(vcpu)) 244 return 0; 245 246 if (!kvm_hv_assist_page_enabled(vcpu)) 247 return 0; 248 249 return kvm_hv_get_assist_page(vcpu); 250 } 251 252 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, 253 bool tdp_enabled) 254 { 255 /* 256 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or 257 * L2's VP_ID upon request from the guest. Make sure we check for 258 * pending entries in the right FIFO upon L1/L2 transition as these 259 * requests are put by other vCPUs asynchronously. 260 */ 261 if (to_hv_vcpu(vcpu) && tdp_enabled) 262 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); 263 } 264 265 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu); 266 #else /* CONFIG_KVM_HYPERV */ 267 static inline void kvm_hv_setup_tsc_page(struct kvm *kvm, 268 struct pvclock_vcpu_time_info *hv_clock) {} 269 static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {} 270 static inline void kvm_hv_init_vm(struct kvm *kvm) {} 271 static inline void kvm_hv_destroy_vm(struct kvm *kvm) {} 272 static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) 273 { 274 return 0; 275 } 276 static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {} 277 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) 278 { 279 return false; 280 } 281 static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 282 { 283 return HV_STATUS_ACCESS_DENIED; 284 } 285 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {} 286 static inline void kvm_hv_free_pa_page(struct kvm *kvm) {} 287 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) 288 { 289 return false; 290 } 291 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector) 292 { 293 return false; 294 } 295 static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {} 296 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu) 297 { 298 return false; 299 } 300 static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {} 301 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) 302 { 303 return false; 304 } 305 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu) 306 { 307 return false; 308 } 309 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu) 310 { 311 return false; 312 } 313 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu) 314 { 315 return 0; 316 } 317 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) 318 { 319 return vcpu->vcpu_idx; 320 } 321 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {} 322 #endif /* CONFIG_KVM_HYPERV */ 323 324 #endif /* __ARCH_X86_KVM_HYPERV_H__ */ 325