1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2021 VMware, Inc., Palo Alto, CA., USA
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26 #ifndef _VMWGFX_MSG_ARM64_H
27 #define _VMWGFX_MSG_ARM64_H
28
29 #if defined(__aarch64__)
30
31 #define VMWARE_HYPERVISOR_PORT 0x5658
32 #define VMWARE_HYPERVISOR_PORT_HB 0x5659
33
34 #define VMWARE_HYPERVISOR_HB BIT(0)
35 #define VMWARE_HYPERVISOR_OUT BIT(1)
36
37 #define VMWARE_HYPERVISOR_MAGIC 0x564D5868
38
39 #define X86_IO_MAGIC 0x86
40
41 #define X86_IO_W7_SIZE_SHIFT 0
42 #define X86_IO_W7_SIZE_MASK (0x3 << X86_IO_W7_SIZE_SHIFT)
43 #define X86_IO_W7_DIR (1 << 2)
44 #define X86_IO_W7_WITH (1 << 3)
45 #define X86_IO_W7_STR (1 << 4)
46 #define X86_IO_W7_DF (1 << 5)
47 #define X86_IO_W7_IMM_SHIFT 5
48 #define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
49
50 static inline
vmware_hypercall1(unsigned long cmd,unsigned long in1)51 unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
52 {
53 register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
54 register u64 x1 asm("x1") = in1;
55 register u64 x2 asm("x2") = cmd;
56 register u64 x3 asm("x3") = VMWARE_HYPERVISOR_PORT;
57 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
58 X86_IO_W7_WITH |
59 X86_IO_W7_DIR |
60 (2 << X86_IO_W7_SIZE_SHIFT);
61
62 asm_inline volatile (
63 "mrs xzr, mdccsr_el0; "
64 : "+r" (x0)
65 : "r" (x1), "r" (x2), "r" (x3), "r" (x7)
66 : "memory");
67
68 return x0;
69 }
70
71 static inline
vmware_hypercall5(unsigned long cmd,unsigned long in1,unsigned long in3,unsigned long in4,unsigned long in5,u32 * out2)72 unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
73 unsigned long in3, unsigned long in4,
74 unsigned long in5, u32 *out2)
75 {
76 register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
77 register u64 x1 asm("x1") = in1;
78 register u64 x2 asm("x2") = cmd;
79 register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
80 register u64 x4 asm("x4") = in4;
81 register u64 x5 asm("x5") = in5;
82 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
83 X86_IO_W7_WITH |
84 X86_IO_W7_DIR |
85 (2 << X86_IO_W7_SIZE_SHIFT);
86
87 asm_inline volatile (
88 "mrs xzr, mdccsr_el0; "
89 : "+r" (x0), "+r" (x2)
90 : "r" (x1), "r" (x3), "r" (x4), "r" (x5), "r" (x7)
91 : "memory");
92
93 *out2 = x2;
94 return x0;
95 }
96
97 static inline
vmware_hypercall6(unsigned long cmd,unsigned long in1,unsigned long in3,u32 * out2,u32 * out3,u32 * out4,u32 * out5)98 unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
99 unsigned long in3, u32 *out2,
100 u32 *out3, u32 *out4, u32 *out5)
101 {
102 register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
103 register u64 x1 asm("x1") = in1;
104 register u64 x2 asm("x2") = cmd;
105 register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
106 register u64 x4 asm("x4");
107 register u64 x5 asm("x5");
108 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
109 X86_IO_W7_WITH |
110 X86_IO_W7_DIR |
111 (2 << X86_IO_W7_SIZE_SHIFT);
112
113 asm_inline volatile (
114 "mrs xzr, mdccsr_el0; "
115 : "+r" (x0), "+r" (x2), "+r" (x3), "=r" (x4), "=r" (x5)
116 : "r" (x1), "r" (x7)
117 : "memory");
118
119 *out2 = x2;
120 *out3 = x3;
121 *out4 = x4;
122 *out5 = x5;
123 return x0;
124 }
125
126 static inline
vmware_hypercall7(unsigned long cmd,unsigned long in1,unsigned long in3,unsigned long in4,unsigned long in5,u32 * out1,u32 * out2,u32 * out3)127 unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
128 unsigned long in3, unsigned long in4,
129 unsigned long in5, u32 *out1,
130 u32 *out2, u32 *out3)
131 {
132 register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
133 register u64 x1 asm("x1") = in1;
134 register u64 x2 asm("x2") = cmd;
135 register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
136 register u64 x4 asm("x4") = in4;
137 register u64 x5 asm("x5") = in5;
138 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
139 X86_IO_W7_WITH |
140 X86_IO_W7_DIR |
141 (2 << X86_IO_W7_SIZE_SHIFT);
142
143 asm_inline volatile (
144 "mrs xzr, mdccsr_el0; "
145 : "+r" (x0), "+r" (x1), "+r" (x2), "+r" (x3)
146 : "r" (x4), "r" (x5), "r" (x7)
147 : "memory");
148
149 *out1 = x1;
150 *out2 = x2;
151 *out3 = x3;
152 return x0;
153 }
154
155 static inline
vmware_hypercall_hb(unsigned long cmd,unsigned long in2,unsigned long in3,unsigned long in4,unsigned long in5,unsigned long in6,u32 * out1,int dir)156 unsigned long vmware_hypercall_hb(unsigned long cmd, unsigned long in2,
157 unsigned long in3, unsigned long in4,
158 unsigned long in5, unsigned long in6,
159 u32 *out1, int dir)
160 {
161 register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
162 register u64 x1 asm("x1") = cmd;
163 register u64 x2 asm("x2") = in2;
164 register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT_HB;
165 register u64 x4 asm("x4") = in4;
166 register u64 x5 asm("x5") = in5;
167 register u64 x6 asm("x6") = in6;
168 register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
169 X86_IO_W7_STR |
170 X86_IO_W7_WITH |
171 dir;
172
173 asm_inline volatile (
174 "mrs xzr, mdccsr_el0; "
175 : "+r" (x0), "+r" (x1)
176 : "r" (x2), "r" (x3), "r" (x4), "r" (x5),
177 "r" (x6), "r" (x7)
178 : "memory");
179
180 *out1 = x1;
181 return x0;
182 }
183
184 static inline
vmware_hypercall_hb_out(unsigned long cmd,unsigned long in2,unsigned long in3,unsigned long in4,unsigned long in5,unsigned long in6,u32 * out1)185 unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
186 unsigned long in3, unsigned long in4,
187 unsigned long in5, unsigned long in6,
188 u32 *out1)
189 {
190 return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1, 0);
191 }
192
193 static inline
vmware_hypercall_hb_in(unsigned long cmd,unsigned long in2,unsigned long in3,unsigned long in4,unsigned long in5,unsigned long in6,u32 * out1)194 unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
195 unsigned long in3, unsigned long in4,
196 unsigned long in5, unsigned long in6,
197 u32 *out1)
198 {
199 return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1,
200 X86_IO_W7_DIR);
201 }
202 #endif
203
204 #endif /* _VMWGFX_MSG_ARM64_H */
205