1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
23
24 #include <linux/kvm_host.h>
25 #include "x86.h"
26
27 #ifdef CONFIG_KVM_HYPERV
28
29 /* "Hv#1" signature */
30 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
31
32 /*
33 * The #defines related to the synthetic debugger are required by KDNet, but
34 * they are not documented in the Hyper-V TLFS because the synthetic debugger
35 * functionality has been deprecated and is subject to removal in future
36 * versions of Windows.
37 */
38 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
39 #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
40 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
41
42 /*
43 * Hyper-V synthetic debugger platform capabilities
44 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
45 */
46 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
47
48 /* Hyper-V Synthetic debug options MSR */
49 #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
50 #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
51 #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
52 #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
53 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
54 #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
55
56 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
57 #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
58
to_kvm_hv(struct kvm * kvm)59 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
60 {
61 return &kvm->arch.hyperv;
62 }
63
to_hv_vcpu(struct kvm_vcpu * vcpu)64 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
65 {
66 return vcpu->arch.hyperv;
67 }
68
to_hv_synic(struct kvm_vcpu * vcpu)69 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
70 {
71 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
72
73 return &hv_vcpu->synic;
74 }
75
hv_synic_to_vcpu(struct kvm_vcpu_hv_synic * synic)76 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
77 {
78 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
79
80 return hv_vcpu->vcpu;
81 }
82
to_hv_syndbg(struct kvm_vcpu * vcpu)83 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
84 {
85 return &vcpu->kvm->arch.hyperv.hv_syndbg;
86 }
87
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)88 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
89 {
90 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
91
92 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
93 }
94
95 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
96 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
97
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)98 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
99 {
100 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
101 }
102
103 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
104
105 void kvm_hv_irq_routing_update(struct kvm *kvm);
106 int kvm_hv_synic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
107 int irq_source_id, int level, bool line_status);
108 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
109 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
110
kvm_hv_synic_has_vector(struct kvm_vcpu * vcpu,int vector)111 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
112 {
113 return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap);
114 }
115
kvm_hv_synic_auto_eoi_set(struct kvm_vcpu * vcpu,int vector)116 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
117 {
118 return to_hv_vcpu(vcpu) &&
119 test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap);
120 }
121
122 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
123
124 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
125 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
126
to_hv_stimer(struct kvm_vcpu * vcpu,int timer_index)127 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
128 int timer_index)
129 {
130 return &to_hv_vcpu(vcpu)->stimer[timer_index];
131 }
132
hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer * stimer)133 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
134 {
135 struct kvm_vcpu_hv *hv_vcpu;
136
137 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
138 stimer[0]);
139 return hv_vcpu->vcpu;
140 }
141
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)142 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
143 {
144 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
145
146 if (!hv_vcpu)
147 return false;
148
149 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
150 HV_SYNIC_STIMER_COUNT);
151 }
152
153 /*
154 * With HV_ACCESS_TSC_INVARIANT feature, invariant TSC (CPUID.80000007H:EDX[8])
155 * is only observed after HV_X64_MSR_TSC_INVARIANT_CONTROL was written to.
156 */
kvm_hv_invtsc_suppressed(struct kvm_vcpu * vcpu)157 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
158 {
159 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
160
161 /*
162 * If Hyper-V's invariant TSC control is not exposed to the guest,
163 * the invariant TSC CPUID flag is not suppressed, Windows guests were
164 * observed to be able to handle it correctly. Going forward, VMMs are
165 * encouraged to enable Hyper-V's invariant TSC control when invariant
166 * TSC CPUID flag is set to make KVM's behavior match genuine Hyper-V.
167 */
168 if (!hv_vcpu ||
169 !(hv_vcpu->cpuid_cache.features_eax & HV_ACCESS_TSC_INVARIANT))
170 return false;
171
172 /*
173 * If Hyper-V's invariant TSC control is exposed to the guest, KVM is
174 * responsible for suppressing the invariant TSC CPUID flag if the
175 * Hyper-V control is not enabled.
176 */
177 return !(to_kvm_hv(vcpu->kvm)->hv_invtsc_control & HV_EXPOSE_INVARIANT_TSC);
178 }
179
180 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
181
182 void kvm_hv_setup_tsc_page(struct kvm *kvm,
183 struct pvclock_vcpu_time_info *hv_clock);
184 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
185
186 void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu);
187
188 void kvm_hv_init_vm(struct kvm *kvm);
189 void kvm_hv_destroy_vm(struct kvm *kvm);
190 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
191 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
192 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
193 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
194 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
195 struct kvm_cpuid_entry2 __user *entries);
196
kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu * vcpu,bool is_guest_mode)197 static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
198 bool is_guest_mode)
199 {
200 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
201 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
202 HV_L1_TLB_FLUSH_FIFO;
203
204 return &hv_vcpu->tlb_flush_fifo[i];
205 }
206
kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu * vcpu)207 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
208 {
209 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
210
211 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
212 return;
213
214 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
215
216 kfifo_reset_out(&tlb_flush_fifo->entries);
217 }
218
guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu * vcpu)219 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
220 {
221 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
222
223 return hv_vcpu &&
224 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
225 }
226
kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu * vcpu)227 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
228 {
229 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
230 u16 code;
231
232 if (!hv_vcpu)
233 return false;
234
235 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) :
236 kvm_rax_read(vcpu);
237
238 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
239 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
240 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
241 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
242 }
243
kvm_hv_verify_vp_assist(struct kvm_vcpu * vcpu)244 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
245 {
246 if (!to_hv_vcpu(vcpu))
247 return 0;
248
249 if (!kvm_hv_assist_page_enabled(vcpu))
250 return 0;
251
252 return kvm_hv_get_assist_page(vcpu);
253 }
254
kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu * vcpu,bool tdp_enabled)255 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
256 bool tdp_enabled)
257 {
258 /*
259 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
260 * L2's VP_ID upon request from the guest. Make sure we check for
261 * pending entries in the right FIFO upon L1/L2 transition as these
262 * requests are put by other vCPUs asynchronously.
263 */
264 if (to_hv_vcpu(vcpu) && tdp_enabled)
265 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
266 }
267
268 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
269 #else /* CONFIG_KVM_HYPERV */
kvm_hv_setup_tsc_page(struct kvm * kvm,struct pvclock_vcpu_time_info * hv_clock)270 static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
271 struct pvclock_vcpu_time_info *hv_clock) {}
kvm_hv_request_tsc_page_update(struct kvm * kvm)272 static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu * vcpu)273 static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {}
kvm_hv_init_vm(struct kvm * kvm)274 static inline void kvm_hv_init_vm(struct kvm *kvm) {}
kvm_hv_destroy_vm(struct kvm * kvm)275 static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
kvm_hv_vcpu_init(struct kvm_vcpu * vcpu)276 static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
277 {
278 return 0;
279 }
kvm_hv_vcpu_uninit(struct kvm_vcpu * vcpu)280 static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)281 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
282 {
283 return false;
284 }
kvm_hv_hypercall(struct kvm_vcpu * vcpu)285 static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
286 {
287 return HV_STATUS_ACCESS_DENIED;
288 }
kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu * vcpu)289 static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
kvm_hv_synic_has_vector(struct kvm_vcpu * vcpu,int vector)290 static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
291 {
292 return false;
293 }
kvm_hv_synic_auto_eoi_set(struct kvm_vcpu * vcpu,int vector)294 static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
295 {
296 return false;
297 }
kvm_hv_synic_send_eoi(struct kvm_vcpu * vcpu,int vector)298 static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
kvm_hv_invtsc_suppressed(struct kvm_vcpu * vcpu)299 static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
300 {
301 return false;
302 }
kvm_hv_set_cpuid(struct kvm_vcpu * vcpu,bool hyperv_enabled)303 static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)304 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
305 {
306 return false;
307 }
kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu * vcpu)308 static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
309 {
310 return false;
311 }
guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu * vcpu)312 static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
313 {
314 return false;
315 }
kvm_hv_verify_vp_assist(struct kvm_vcpu * vcpu)316 static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
317 {
318 return 0;
319 }
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)320 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
321 {
322 return vcpu->vcpu_idx;
323 }
kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu * vcpu,bool tdp_enabled)324 static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
325 #endif /* CONFIG_KVM_HYPERV */
326
327 #endif /* __ARCH_X86_KVM_HYPERV_H__ */
328