1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/errno.h>
5 #include <linux/smp.h>
6
7 #include "../cpuid.h"
8 #include "hyperv.h"
9 #include "nested.h"
10 #include "vmcs.h"
11 #include "vmx.h"
12 #include "trace.h"
13
14 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
15
nested_get_evmptr(struct kvm_vcpu * vcpu)16 u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
17 {
18 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
19
20 if (unlikely(kvm_hv_get_assist_page(vcpu)))
21 return EVMPTR_INVALID;
22
23 if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
24 return EVMPTR_INVALID;
25
26 return hv_vcpu->vp_assist_page.current_nested_vmcs;
27 }
28
nested_get_evmcs_version(struct kvm_vcpu * vcpu)29 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
30 {
31 /*
32 * vmcs_version represents the range of supported Enlightened VMCS
33 * versions: lower 8 bits is the minimal version, higher 8 bits is the
34 * maximum supported version. KVM supports versions from 1 to
35 * KVM_EVMCS_VERSION.
36 *
37 * Note, do not check the Hyper-V is fully enabled in guest CPUID, this
38 * helper is used to _get_ the vCPU's supported CPUID.
39 */
40 if (kvm_cpu_cap_get(X86_FEATURE_VMX) &&
41 (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
42 return (KVM_EVMCS_VERSION << 8) | 1;
43
44 return 0;
45 }
46
47 enum evmcs_revision {
48 EVMCSv1_LEGACY,
49 NR_EVMCS_REVISIONS,
50 };
51
52 enum evmcs_ctrl_type {
53 EVMCS_EXIT_CTRLS,
54 EVMCS_ENTRY_CTRLS,
55 EVMCS_EXEC_CTRL,
56 EVMCS_2NDEXEC,
57 EVMCS_3RDEXEC,
58 EVMCS_PINCTRL,
59 EVMCS_VMFUNC,
60 NR_EVMCS_CTRLS,
61 };
62
63 static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = {
64 [EVMCS_EXIT_CTRLS] = {
65 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMEXIT_CTRL,
66 },
67 [EVMCS_ENTRY_CTRLS] = {
68 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMENTRY_CTRL,
69 },
70 [EVMCS_EXEC_CTRL] = {
71 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_EXEC_CTRL,
72 },
73 [EVMCS_2NDEXEC] = {
74 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING,
75 },
76 [EVMCS_3RDEXEC] = {
77 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_3RDEXEC,
78 },
79 [EVMCS_PINCTRL] = {
80 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL,
81 },
82 [EVMCS_VMFUNC] = {
83 [EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMFUNC,
84 },
85 };
86
evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)87 static u32 evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)
88 {
89 enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY;
90
91 return evmcs_supported_ctrls[ctrl_type][evmcs_rev];
92 }
93
evmcs_has_perf_global_ctrl(struct kvm_vcpu * vcpu)94 static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu)
95 {
96 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
97
98 /*
99 * PERF_GLOBAL_CTRL has a quirk where some Windows guests may fail to
100 * boot if a PV CPUID feature flag is not also set. Treat the fields
101 * as unsupported if the flag is not set in guest CPUID. This should
102 * be called only for guest accesses, and all guest accesses should be
103 * gated on Hyper-V being enabled and initialized.
104 */
105 if (WARN_ON_ONCE(!hv_vcpu))
106 return false;
107
108 return hv_vcpu->cpuid_cache.nested_ebx & HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
109 }
110
nested_evmcs_filter_control_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 * pdata)111 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
112 {
113 u32 ctl_low = (u32)*pdata;
114 u32 ctl_high = (u32)(*pdata >> 32);
115 u32 supported_ctrls;
116
117 /*
118 * Hyper-V 2016 and 2019 try using these features even when eVMCS
119 * is enabled but there are no corresponding fields.
120 */
121 switch (msr_index) {
122 case MSR_IA32_VMX_EXIT_CTLS:
123 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
124 supported_ctrls = evmcs_get_supported_ctls(EVMCS_EXIT_CTRLS);
125 if (!evmcs_has_perf_global_ctrl(vcpu))
126 supported_ctrls &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
127 ctl_high &= supported_ctrls;
128 break;
129 case MSR_IA32_VMX_ENTRY_CTLS:
130 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
131 supported_ctrls = evmcs_get_supported_ctls(EVMCS_ENTRY_CTRLS);
132 if (!evmcs_has_perf_global_ctrl(vcpu))
133 supported_ctrls &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
134 ctl_high &= supported_ctrls;
135 break;
136 case MSR_IA32_VMX_PROCBASED_CTLS:
137 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
138 ctl_high &= evmcs_get_supported_ctls(EVMCS_EXEC_CTRL);
139 break;
140 case MSR_IA32_VMX_PROCBASED_CTLS2:
141 ctl_high &= evmcs_get_supported_ctls(EVMCS_2NDEXEC);
142 break;
143 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
144 case MSR_IA32_VMX_PINBASED_CTLS:
145 ctl_high &= evmcs_get_supported_ctls(EVMCS_PINCTRL);
146 break;
147 case MSR_IA32_VMX_VMFUNC:
148 ctl_low &= evmcs_get_supported_ctls(EVMCS_VMFUNC);
149 break;
150 }
151
152 *pdata = ctl_low | ((u64)ctl_high << 32);
153 }
154
nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,u32 val)155 static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,
156 u32 val)
157 {
158 return !(val & ~evmcs_get_supported_ctls(ctrl_type));
159 }
160
nested_evmcs_check_controls(struct vmcs12 * vmcs12)161 int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
162 {
163 if (CC(!nested_evmcs_is_valid_controls(EVMCS_PINCTRL,
164 vmcs12->pin_based_vm_exec_control)))
165 return -EINVAL;
166
167 if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXEC_CTRL,
168 vmcs12->cpu_based_vm_exec_control)))
169 return -EINVAL;
170
171 if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC,
172 vmcs12->secondary_vm_exec_control)))
173 return -EINVAL;
174
175 if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXIT_CTRLS,
176 vmcs12->vm_exit_controls)))
177 return -EINVAL;
178
179 if (CC(!nested_evmcs_is_valid_controls(EVMCS_ENTRY_CTRLS,
180 vmcs12->vm_entry_controls)))
181 return -EINVAL;
182
183 /*
184 * VM-Func controls are 64-bit, but KVM currently doesn't support any
185 * controls in bits 63:32, i.e. dropping those bits on the consistency
186 * check is intentional.
187 */
188 if (WARN_ON_ONCE(vmcs12->vm_function_control >> 32))
189 return -EINVAL;
190
191 if (CC(!nested_evmcs_is_valid_controls(EVMCS_VMFUNC,
192 vmcs12->vm_function_control)))
193 return -EINVAL;
194
195 return 0;
196 }
197
nested_enable_evmcs(struct kvm_vcpu * vcpu,uint16_t * vmcs_version)198 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
199 uint16_t *vmcs_version)
200 {
201 struct vcpu_vmx *vmx = to_vmx(vcpu);
202
203 vmx->nested.enlightened_vmcs_enabled = true;
204
205 if (vmcs_version)
206 *vmcs_version = nested_get_evmcs_version(vcpu);
207
208 return 0;
209 }
210
nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu * vcpu)211 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
212 {
213 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
214 struct vcpu_vmx *vmx = to_vmx(vcpu);
215 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
216
217 if (!hv_vcpu || !evmcs)
218 return false;
219
220 if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
221 return false;
222
223 return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
224 }
225
vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu * vcpu)226 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
227 {
228 nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
229 }
230