1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
23
24 #include <linux/kvm_host.h>
25 #include "x86.h"
26
27 #ifdef CONFIG_KVM_HYPERV
28
29 /* "Hv#1" signature */
30 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
31
32 /*
33 * The #defines related to the synthetic debugger are required by KDNet, but
34 * they are not documented in the Hyper-V TLFS because the synthetic debugger
35 * functionality has been deprecated and is subject to removal in future
36 * versions of Windows.
37 */
38 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
39 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
40 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
41
42 /*
43 * Hyper-V synthetic debugger platform capabilities
44 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
45 */
46 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
47
48 /* Hyper-V Synthetic debug options MSR */
49 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
50 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
51 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
52 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
53 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
54 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
55
56 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
57 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
58
to_kvm_hv(struct kvm * kvm)59 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
60 {
61 return &kvm->arch.hyperv;
62 }
63
to_hv_vcpu(struct kvm_vcpu * vcpu)64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
65 {
66 return vcpu->arch.hyperv;
67 }
68
to_hv_synic(struct kvm_vcpu * vcpu)69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
70 {
71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
72
73 return &hv_vcpu->synic;
74 }
75
hv_synic_to_vcpu(struct kvm_vcpu_hv_synic * synic)76 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
77 {
78 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
79
80 return hv_vcpu->vcpu;
81 }
82
to_hv_syndbg(struct kvm_vcpu * vcpu)83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
84 {
85 return &vcpu->kvm->arch.hyperv.hv_syndbg;
86 }
87
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
89 {
90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
91
92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
93 }
94
95 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
96 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
97
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)98 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
99 {
100 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
101 }
102
103 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
104
105 void kvm_hv_irq_routing_update(struct kvm *kvm);
106 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
107 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
108 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
109
kvm_hv_synic_has_vector(struct kvm_vcpu * vcpu,int vector)110 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
111 {
112 return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap);
113 }
114
kvm_hv_synic_auto_eoi_set(struct kvm_vcpu * vcpu,int vector)115 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
116 {
117 return to_hv_vcpu(vcpu) &&
118 test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap);
119 }
120
121 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
122
123 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
124 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
125
to_hv_stimer(struct kvm_vcpu * vcpu,int timer_index)126 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
127 int timer_index)
128 {
129 return &to_hv_vcpu(vcpu)->stimer[timer_index];
130 }
131
hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer * stimer)132 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
133 {
134 struct kvm_vcpu_hv *hv_vcpu;
135
136 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
137 stimer[0]);
138 return hv_vcpu->vcpu;
139 }
140
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)141 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
142 {
143 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
144
145 if (!hv_vcpu)
146 return false;
147
148 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
149 HV_SYNIC_STIMER_COUNT);
150 }
151
152 /*
153 * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8])
154 * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to.
155 */
kvm_hv_invtsc_suppressed(struct kvm_vcpu * vcpu)156 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
157 {
158 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
159
160 /*
161 * If Hyper-V's invariant TSC control is not exposed to the guest,
162 * the invariant TSC CPUID flag is not suppressed, Windows guests were
163 * observed to be able to handle it correctly. Going forward, VMMs are
164 * encouraged to enable Hyper-V's invariant TSC control when invariant
165 * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V.
166 */
167 if (!hv_vcpu ||
168 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT))
169 return false;
170
171 /*
172 * If Hyper-V's invariant TSC control is exposed to the guest, KVM is
173 * responsible for suppressing the invariant TSC CPUID flag if the
174 * Hyper-V control is not enabled.
175 */
176 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC);
177 }
178
179 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
180
181 void kvm_hv_setup_tsc_page(struct kvm *kvm,
182 struct pvclock_vcpu_time_info *hv_clock);
183 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
184
185 void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu);
186
187 void kvm_hv_init_vm(struct kvm *kvm);
188 void kvm_hv_destroy_vm(struct kvm *kvm);
189 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
190 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
191 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
192 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
193 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
194 struct kvm_cpuid_entry2 __user *entries);
195
kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu * vcpu,bool is_guest_mode)196 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
197 bool is_guest_mode)
198 {
199 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
200 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
201 HV_L1_TLB_FLUSH_FIFO;
202
203 return &hv_vcpu->tlb_flush_fifo[i];
204 }
205
kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu * vcpu)206 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
207 {
208 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
209
210 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
211 return;
212
213 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
214
215 kfifo_reset_out(&tlb_flush_fifo->entries);
216 }
217
guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu * vcpu)218 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
219 {
220 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
221
222 return hv_vcpu &&
223 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
224 }
225
kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu * vcpu)226 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
227 {
228 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
229 u16 code;
230
231 if (!hv_vcpu)
232 return false;
233
234 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) :
235 kvm_rax_read(vcpu);
236
237 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
238 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
239 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
240 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
241 }
242
kvm_hv_verify_vp_assist(struct kvm_vcpu * vcpu)243 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
244 {
245 if (!to_hv_vcpu(vcpu))
246 return 0;
247
248 if (!kvm_hv_assist_page_enabled(vcpu))
249 return 0;
250
251 return kvm_hv_get_assist_page(vcpu);
252 }
253
kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu * vcpu,bool tdp_enabled)254 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
255 bool tdp_enabled)
256 {
257 /*
258 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
259 * L2's VP_ID upon request from the guest. Make sure we check for
260 * pending entries in the right FIFO upon L1/L2 transition as these
261 * requests are put by other vCPUs asynchronously.
262 */
263 if (to_hv_vcpu(vcpu) && tdp_enabled)
264 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
265 }
266
267 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
268 #else /* CONFIG_KVM_HYPERV */
kvm_hv_setup_tsc_page(struct kvm * kvm,struct pvclock_vcpu_time_info * hv_clock)269 static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
270 struct pvclock_vcpu_time_info *hv_clock) {}
kvm_hv_request_tsc_page_update(struct kvm * kvm)271 static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu * vcpu)272 static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {}
kvm_hv_init_vm(struct kvm * kvm)273 static inline void kvm_hv_init_vm(struct kvm *kvm) {}
kvm_hv_destroy_vm(struct kvm * kvm)274 static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
kvm_hv_vcpu_init(struct kvm_vcpu * vcpu)275 static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
276 {
277 return 0;
278 }
kvm_hv_vcpu_uninit(struct kvm_vcpu * vcpu)279 static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)280 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
281 {
282 return false;
283 }
kvm_hv_hypercall(struct kvm_vcpu * vcpu)284 static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
285 {
286 return HV_STATUS_ACCESS_DENIED;
287 }
kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu * vcpu)288 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
kvm_hv_synic_has_vector(struct kvm_vcpu * vcpu,int vector)289 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
290 {
291 return false;
292 }
kvm_hv_synic_auto_eoi_set(struct kvm_vcpu * vcpu,int vector)293 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
294 {
295 return false;
296 }
kvm_hv_synic_send_eoi(struct kvm_vcpu * vcpu,int vector)297 static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
kvm_hv_invtsc_suppressed(struct kvm_vcpu * vcpu)298 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
299 {
300 return false;
301 }
kvm_hv_set_cpuid(struct kvm_vcpu * vcpu,bool hyperv_enabled)302 static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)303 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
304 {
305 return false;
306 }
kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu * vcpu)307 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
308 {
309 return false;
310 }
guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu * vcpu)311 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
312 {
313 return false;
314 }
kvm_hv_verify_vp_assist(struct kvm_vcpu * vcpu)315 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
316 {
317 return 0;
318 }
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)319 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
320 {
321 return vcpu->vcpu_idx;
322 }
kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu * vcpu,bool tdp_enabled)323 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
324 #endif /* CONFIG_KVM_HYPERV */
325
326 #endif /* __ARCH_X86_KVM_HYPERV_H__ */
327