xref: /linux/arch/x86/kvm/vmx/hyperv.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/errno.h>
5 #include <linux/smp.h>
6 
7 #include "x86.h"
8 #include "../cpuid.h"
9 #include "hyperv.h"
10 #include "nested.h"
11 #include "vmcs.h"
12 #include "vmx.h"
13 #include "trace.h"
14 
15 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
16 
17 u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
18 {
19 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
20 
21 	if (unlikely(kvm_hv_get_assist_page(vcpu)))
22 		return EVMPTR_INVALID;
23 
24 	if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
25 		return EVMPTR_INVALID;
26 
27 	return hv_vcpu->vp_assist_page.current_nested_vmcs;
28 }
29 
30 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
31 {
32 	/*
33 	 * vmcs_version represents the range of supported Enlightened VMCS
34 	 * versions: lower 8 bits is the minimal version, higher 8 bits is the
35 	 * maximum supported version. KVM supports versions from 1 to
36 	 * KVM_EVMCS_VERSION.
37 	 *
38 	 * Note, do not check the Hyper-V is fully enabled in guest CPUID, this
39 	 * helper is used to _get_ the vCPU's supported CPUID.
40 	 */
41 	if (kvm_cpu_cap_get(X86_FEATURE_VMX) &&
42 	    (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
43 		return (KVM_EVMCS_VERSION << 8) | 1;
44 
45 	return 0;
46 }
47 
48 enum evmcs_revision {
49 	EVMCSv1_LEGACY,
50 	NR_EVMCS_REVISIONS,
51 };
52 
53 enum evmcs_ctrl_type {
54 	EVMCS_EXIT_CTRLS,
55 	EVMCS_ENTRY_CTRLS,
56 	EVMCS_EXEC_CTRL,
57 	EVMCS_2NDEXEC,
58 	EVMCS_3RDEXEC,
59 	EVMCS_PINCTRL,
60 	EVMCS_VMFUNC,
61 	NR_EVMCS_CTRLS,
62 };
63 
64 static const u32 evmcs_supported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = {
65 	[EVMCS_EXIT_CTRLS] = {
66 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMEXIT_CTRL,
67 	},
68 	[EVMCS_ENTRY_CTRLS] = {
69 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMENTRY_CTRL,
70 	},
71 	[EVMCS_EXEC_CTRL] = {
72 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_EXEC_CTRL,
73 	},
74 	[EVMCS_2NDEXEC] = {
75 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_2NDEXEC & ~SECONDARY_EXEC_TSC_SCALING,
76 	},
77 	[EVMCS_3RDEXEC] = {
78 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_3RDEXEC,
79 	},
80 	[EVMCS_PINCTRL] = {
81 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_PINCTRL,
82 	},
83 	[EVMCS_VMFUNC] = {
84 		[EVMCSv1_LEGACY] = EVMCS1_SUPPORTED_VMFUNC,
85 	},
86 };
87 
88 static u32 evmcs_get_supported_ctls(enum evmcs_ctrl_type ctrl_type)
89 {
90 	enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY;
91 
92 	return evmcs_supported_ctrls[ctrl_type][evmcs_rev];
93 }
94 
95 static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu)
96 {
97 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
98 
99 	/*
100 	 * PERF_GLOBAL_CTRL has a quirk where some Windows guests may fail to
101 	 * boot if a PV CPUID feature flag is not also set.  Treat the fields
102 	 * as unsupported if the flag is not set in guest CPUID.  This should
103 	 * be called only for guest accesses, and all guest accesses should be
104 	 * gated on Hyper-V being enabled and initialized.
105 	 */
106 	if (WARN_ON_ONCE(!hv_vcpu))
107 		return false;
108 
109 	return hv_vcpu->cpuid_cache.nested_ebx & HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
110 }
111 
112 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
113 {
114 	u32 ctl_low = (u32)*pdata;
115 	u32 ctl_high = (u32)(*pdata >> 32);
116 	u32 supported_ctrls;
117 
118 	/*
119 	 * Hyper-V 2016 and 2019 try using these features even when eVMCS
120 	 * is enabled but there are no corresponding fields.
121 	 */
122 	switch (msr_index) {
123 	case MSR_IA32_VMX_EXIT_CTLS:
124 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
125 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_EXIT_CTRLS);
126 		if (!evmcs_has_perf_global_ctrl(vcpu))
127 			supported_ctrls &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
128 		ctl_high &= supported_ctrls;
129 		break;
130 	case MSR_IA32_VMX_ENTRY_CTLS:
131 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
132 		supported_ctrls = evmcs_get_supported_ctls(EVMCS_ENTRY_CTRLS);
133 		if (!evmcs_has_perf_global_ctrl(vcpu))
134 			supported_ctrls &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
135 		ctl_high &= supported_ctrls;
136 		break;
137 	case MSR_IA32_VMX_PROCBASED_CTLS:
138 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
139 		ctl_high &= evmcs_get_supported_ctls(EVMCS_EXEC_CTRL);
140 		break;
141 	case MSR_IA32_VMX_PROCBASED_CTLS2:
142 		ctl_high &= evmcs_get_supported_ctls(EVMCS_2NDEXEC);
143 		break;
144 	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
145 	case MSR_IA32_VMX_PINBASED_CTLS:
146 		ctl_high &= evmcs_get_supported_ctls(EVMCS_PINCTRL);
147 		break;
148 	case MSR_IA32_VMX_VMFUNC:
149 		ctl_low &= evmcs_get_supported_ctls(EVMCS_VMFUNC);
150 		break;
151 	}
152 
153 	*pdata = ctl_low | ((u64)ctl_high << 32);
154 }
155 
156 static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,
157 					   u32 val)
158 {
159 	return !(val & ~evmcs_get_supported_ctls(ctrl_type));
160 }
161 
162 int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
163 {
164 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_PINCTRL,
165 					       vmcs12->pin_based_vm_exec_control)))
166 		return -EINVAL;
167 
168 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXEC_CTRL,
169 					       vmcs12->cpu_based_vm_exec_control)))
170 		return -EINVAL;
171 
172 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC,
173 					       vmcs12->secondary_vm_exec_control)))
174 		return -EINVAL;
175 
176 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXIT_CTRLS,
177 					       vmcs12->vm_exit_controls)))
178 		return -EINVAL;
179 
180 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_ENTRY_CTRLS,
181 					       vmcs12->vm_entry_controls)))
182 		return -EINVAL;
183 
184 	/*
185 	 * VM-Func controls are 64-bit, but KVM currently doesn't support any
186 	 * controls in bits 63:32, i.e. dropping those bits on the consistency
187 	 * check is intentional.
188 	 */
189 	if (WARN_ON_ONCE(vmcs12->vm_function_control >> 32))
190 		return -EINVAL;
191 
192 	if (CC(!nested_evmcs_is_valid_controls(EVMCS_VMFUNC,
193 					       vmcs12->vm_function_control)))
194 		return -EINVAL;
195 
196 	return 0;
197 }
198 
199 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
200 			uint16_t *vmcs_version)
201 {
202 	struct vcpu_vmx *vmx = to_vmx(vcpu);
203 
204 	vmx->nested.enlightened_vmcs_enabled = true;
205 
206 	if (vmcs_version)
207 		*vmcs_version = nested_get_evmcs_version(vcpu);
208 
209 	return 0;
210 }
211 
212 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
213 {
214 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
215 	struct vcpu_vmx *vmx = to_vmx(vcpu);
216 	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
217 
218 	if (!hv_vcpu || !evmcs)
219 		return false;
220 
221 	if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
222 		return false;
223 
224 	return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
225 }
226 
227 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
228 {
229 	nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
230 }
231