xref: /linux/arch/x86/include/asm/vmware.h (revision 55d0969c451159cff86949b38c39171cab962069)
1 /* SPDX-License-Identifier: GPL-2.0 or MIT */
2 #ifndef _ASM_X86_VMWARE_H
3 #define _ASM_X86_VMWARE_H
4 
5 #include <asm/cpufeatures.h>
6 #include <asm/alternative.h>
7 #include <linux/stringify.h>
8 
9 /*
10  * VMware hypercall ABI.
11  *
12  * - Low bandwidth (LB) hypercalls (I/O port based, vmcall and vmmcall)
13  * have up to 6 input and 6 output arguments passed and returned using
14  * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
15  * %esi (arg4), %edi (arg5).
16  * The following input arguments must be initialized by the caller:
17  * arg0 - VMWARE_HYPERVISOR_MAGIC
18  * arg2 - Hypercall command
19  * arg3 bits [15:0] - Port number, LB and direction flags
20  *
21  * - Low bandwidth TDX hypercalls (x86_64 only) are similar to LB
22  * hypercalls. They also have up to 6 input and 6 output on registers
23  * arguments, with different argument to register mapping:
24  * %r12 (arg0), %rbx (arg1), %r13 (arg2), %rdx (arg3),
25  * %rsi (arg4), %rdi (arg5).
26  *
27  * - High bandwidth (HB) hypercalls are I/O port based only. They have
28  * up to 7 input and 7 output arguments passed and returned using
29  * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
30  * %esi (arg4), %edi (arg5), %ebp (arg6).
31  * The following input arguments must be initialized by the caller:
32  * arg0 - VMWARE_HYPERVISOR_MAGIC
33  * arg1 - Hypercall command
34  * arg3 bits [15:0] - Port number, HB and direction flags
35  *
36  * For compatibility purposes, x86_64 systems use only lower 32 bits
37  * for input and output arguments.
38  *
39  * The hypercall definitions differ in the low word of the %edx (arg3)
40  * in the following way: the old I/O port based interface uses the port
41  * number to distinguish between high- and low bandwidth versions, and
42  * uses IN/OUT instructions to define transfer direction.
43  *
44  * The new vmcall interface instead uses a set of flags to select
45  * bandwidth mode and transfer direction. The flags should be loaded
46  * into arg3 by any user and are automatically replaced by the port
47  * number if the I/O port method is used.
48  */
49 
50 #define VMWARE_HYPERVISOR_HB		BIT(0)
51 #define VMWARE_HYPERVISOR_OUT		BIT(1)
52 
53 #define VMWARE_HYPERVISOR_PORT		0x5658
54 #define VMWARE_HYPERVISOR_PORT_HB	(VMWARE_HYPERVISOR_PORT | \
55 					 VMWARE_HYPERVISOR_HB)
56 
57 #define VMWARE_HYPERVISOR_MAGIC		0x564d5868U
58 
59 #define VMWARE_CMD_GETVERSION		10
60 #define VMWARE_CMD_GETHZ		45
61 #define VMWARE_CMD_GETVCPU_INFO		68
62 #define VMWARE_CMD_STEALCLOCK		91
63 /*
64  * Hypercall command mask:
65  *   bits [6:0] command, range [0, 127]
66  *   bits [19:16] sub-command, range [0, 15]
67  */
68 #define VMWARE_CMD_MASK			0xf007fU
69 
70 #define CPUID_VMWARE_FEATURES_ECX_VMMCALL	BIT(0)
71 #define CPUID_VMWARE_FEATURES_ECX_VMCALL	BIT(1)
72 
73 extern unsigned long vmware_hypercall_slow(unsigned long cmd,
74 					   unsigned long in1, unsigned long in3,
75 					   unsigned long in4, unsigned long in5,
76 					   u32 *out1, u32 *out2, u32 *out3,
77 					   u32 *out4, u32 *out5);
78 
79 #define VMWARE_TDX_VENDOR_LEAF 0x1af7e4909ULL
80 #define VMWARE_TDX_HCALL_FUNC  1
81 
82 extern unsigned long vmware_tdx_hypercall(unsigned long cmd,
83 					  unsigned long in1, unsigned long in3,
84 					  unsigned long in4, unsigned long in5,
85 					  u32 *out1, u32 *out2, u32 *out3,
86 					  u32 *out4, u32 *out5);
87 
88 /*
89  * The low bandwidth call. The low word of %edx is presumed to have OUT bit
90  * set. The high word of %edx may contain input data from the caller.
91  */
92 #define VMWARE_HYPERCALL					\
93 	ALTERNATIVE_2("movw %[port], %%dx\n\t"			\
94 		      "inl (%%dx), %%eax",			\
95 		      "vmcall", X86_FEATURE_VMCALL,		\
96 		      "vmmcall", X86_FEATURE_VMW_VMMCALL)
97 
98 static inline
99 unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
100 {
101 	unsigned long out0;
102 
103 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
104 		return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
105 					    NULL, NULL, NULL, NULL, NULL);
106 
107 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
108 		return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
109 					     NULL, NULL, NULL, NULL, NULL);
110 
111 	asm_inline volatile (VMWARE_HYPERCALL
112 		: "=a" (out0)
113 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
114 		  "a" (VMWARE_HYPERVISOR_MAGIC),
115 		  "b" (in1),
116 		  "c" (cmd),
117 		  "d" (0)
118 		: "cc", "memory");
119 	return out0;
120 }
121 
122 static inline
123 unsigned long vmware_hypercall3(unsigned long cmd, unsigned long in1,
124 				u32 *out1, u32 *out2)
125 {
126 	unsigned long out0;
127 
128 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
129 		return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
130 					    out1, out2, NULL, NULL, NULL);
131 
132 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
133 		return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
134 					     out1, out2, NULL, NULL, NULL);
135 
136 	asm_inline volatile (VMWARE_HYPERCALL
137 		: "=a" (out0), "=b" (*out1), "=c" (*out2)
138 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
139 		  "a" (VMWARE_HYPERVISOR_MAGIC),
140 		  "b" (in1),
141 		  "c" (cmd),
142 		  "d" (0)
143 		: "cc", "memory");
144 	return out0;
145 }
146 
147 static inline
148 unsigned long vmware_hypercall4(unsigned long cmd, unsigned long in1,
149 				u32 *out1, u32 *out2, u32 *out3)
150 {
151 	unsigned long out0;
152 
153 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
154 		return vmware_tdx_hypercall(cmd, in1, 0, 0, 0,
155 					    out1, out2, out3, NULL, NULL);
156 
157 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
158 		return vmware_hypercall_slow(cmd, in1, 0, 0, 0,
159 					     out1, out2, out3, NULL, NULL);
160 
161 	asm_inline volatile (VMWARE_HYPERCALL
162 		: "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
163 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
164 		  "a" (VMWARE_HYPERVISOR_MAGIC),
165 		  "b" (in1),
166 		  "c" (cmd),
167 		  "d" (0)
168 		: "cc", "memory");
169 	return out0;
170 }
171 
172 static inline
173 unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
174 				unsigned long in3, unsigned long in4,
175 				unsigned long in5, u32 *out2)
176 {
177 	unsigned long out0;
178 
179 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
180 		return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
181 					    NULL, out2, NULL, NULL, NULL);
182 
183 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
184 		return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
185 					     NULL, out2, NULL, NULL, NULL);
186 
187 	asm_inline volatile (VMWARE_HYPERCALL
188 		: "=a" (out0), "=c" (*out2)
189 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
190 		  "a" (VMWARE_HYPERVISOR_MAGIC),
191 		  "b" (in1),
192 		  "c" (cmd),
193 		  "d" (in3),
194 		  "S" (in4),
195 		  "D" (in5)
196 		: "cc", "memory");
197 	return out0;
198 }
199 
200 static inline
201 unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
202 				unsigned long in3, u32 *out2,
203 				u32 *out3, u32 *out4, u32 *out5)
204 {
205 	unsigned long out0;
206 
207 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
208 		return vmware_tdx_hypercall(cmd, in1, in3, 0, 0,
209 					    NULL, out2, out3, out4, out5);
210 
211 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
212 		return vmware_hypercall_slow(cmd, in1, in3, 0, 0,
213 					     NULL, out2, out3, out4, out5);
214 
215 	asm_inline volatile (VMWARE_HYPERCALL
216 		: "=a" (out0), "=c" (*out2), "=d" (*out3), "=S" (*out4),
217 		  "=D" (*out5)
218 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
219 		  "a" (VMWARE_HYPERVISOR_MAGIC),
220 		  "b" (in1),
221 		  "c" (cmd),
222 		  "d" (in3)
223 		: "cc", "memory");
224 	return out0;
225 }
226 
227 static inline
228 unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
229 				unsigned long in3, unsigned long in4,
230 				unsigned long in5, u32 *out1,
231 				u32 *out2, u32 *out3)
232 {
233 	unsigned long out0;
234 
235 	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
236 		return vmware_tdx_hypercall(cmd, in1, in3, in4, in5,
237 					    out1, out2, out3, NULL, NULL);
238 
239 	if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
240 		return vmware_hypercall_slow(cmd, in1, in3, in4, in5,
241 					     out1, out2, out3, NULL, NULL);
242 
243 	asm_inline volatile (VMWARE_HYPERCALL
244 		: "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
245 		: [port] "i" (VMWARE_HYPERVISOR_PORT),
246 		  "a" (VMWARE_HYPERVISOR_MAGIC),
247 		  "b" (in1),
248 		  "c" (cmd),
249 		  "d" (in3),
250 		  "S" (in4),
251 		  "D" (in5)
252 		: "cc", "memory");
253 	return out0;
254 }
255 
256 #ifdef CONFIG_X86_64
257 #define VMW_BP_CONSTRAINT "r"
258 #else
259 #define VMW_BP_CONSTRAINT "m"
260 #endif
261 
262 /*
263  * High bandwidth calls are not supported on encrypted memory guests.
264  * The caller should check cc_platform_has(CC_ATTR_MEM_ENCRYPT) and use
265  * low bandwidth hypercall if memory encryption is set.
266  * This assumption simplifies HB hypercall implementation to just I/O port
267  * based approach without alternative patching.
268  */
269 static inline
270 unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
271 				      unsigned long in3, unsigned long in4,
272 				      unsigned long in5, unsigned long in6,
273 				      u32 *out1)
274 {
275 	unsigned long out0;
276 
277 	asm_inline volatile (
278 		UNWIND_HINT_SAVE
279 		"push %%" _ASM_BP "\n\t"
280 		UNWIND_HINT_UNDEFINED
281 		"mov %[in6], %%" _ASM_BP "\n\t"
282 		"rep outsb\n\t"
283 		"pop %%" _ASM_BP "\n\t"
284 		UNWIND_HINT_RESTORE
285 		: "=a" (out0), "=b" (*out1)
286 		: "a" (VMWARE_HYPERVISOR_MAGIC),
287 		  "b" (cmd),
288 		  "c" (in2),
289 		  "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
290 		  "S" (in4),
291 		  "D" (in5),
292 		  [in6] VMW_BP_CONSTRAINT (in6)
293 		: "cc", "memory");
294 	return out0;
295 }
296 
297 static inline
298 unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
299 				     unsigned long in3, unsigned long in4,
300 				     unsigned long in5, unsigned long in6,
301 				     u32 *out1)
302 {
303 	unsigned long out0;
304 
305 	asm_inline volatile (
306 		UNWIND_HINT_SAVE
307 		"push %%" _ASM_BP "\n\t"
308 		UNWIND_HINT_UNDEFINED
309 		"mov %[in6], %%" _ASM_BP "\n\t"
310 		"rep insb\n\t"
311 		"pop %%" _ASM_BP "\n\t"
312 		UNWIND_HINT_RESTORE
313 		: "=a" (out0), "=b" (*out1)
314 		: "a" (VMWARE_HYPERVISOR_MAGIC),
315 		  "b" (cmd),
316 		  "c" (in2),
317 		  "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
318 		  "S" (in4),
319 		  "D" (in5),
320 		  [in6] VMW_BP_CONSTRAINT (in6)
321 		: "cc", "memory");
322 	return out0;
323 }
324 #undef VMW_BP_CONSTRAINT
325 #undef VMWARE_HYPERCALL
326 
327 #endif
328