xref: /linux/include/uapi/drm/virtgpu_drm.h (revision b08494a8f7416e5f09907318c5460ad6f6e2a548)
1 /*
2  * Copyright 2013 Red Hat
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #ifndef VIRTGPU_DRM_H
25 #define VIRTGPU_DRM_H
26 
27 #include "drm.h"
28 
29 #if defined(__cplusplus)
30 extern "C" {
31 #endif
32 
33 /* Please note that modifications to all structs defined here are
34  * subject to backwards-compatibility constraints.
35  *
36  * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
37  * compatibility Keep fields aligned to their size
38  */
39 
40 #define DRM_VIRTGPU_MAP         0x01
41 #define DRM_VIRTGPU_EXECBUFFER  0x02
42 #define DRM_VIRTGPU_GETPARAM    0x03
43 #define DRM_VIRTGPU_RESOURCE_CREATE 0x04
44 #define DRM_VIRTGPU_RESOURCE_INFO     0x05
45 #define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
46 #define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
47 #define DRM_VIRTGPU_WAIT     0x08
48 #define DRM_VIRTGPU_GET_CAPS  0x09
49 #define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
50 #define DRM_VIRTGPU_CONTEXT_INIT 0x0b
51 
52 #define VIRTGPU_EXECBUF_FENCE_FD_IN	0x01
53 #define VIRTGPU_EXECBUF_FENCE_FD_OUT	0x02
54 #define VIRTGPU_EXECBUF_RING_IDX	0x04
55 #define VIRTGPU_EXECBUF_FLAGS  (\
56 		VIRTGPU_EXECBUF_FENCE_FD_IN |\
57 		VIRTGPU_EXECBUF_FENCE_FD_OUT |\
58 		VIRTGPU_EXECBUF_RING_IDX |\
59 		0)
60 
61 struct drm_virtgpu_map {
62 	__u64 offset; /* use for mmap system call */
63 	__u32 handle;
64 	__u32 pad;
65 };
66 
67 #define VIRTGPU_EXECBUF_SYNCOBJ_RESET		0x01
68 #define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
69 		VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
70 		0)
71 struct drm_virtgpu_execbuffer_syncobj {
72 	__u32 handle;
73 	__u32 flags;
74 	__u64 point;
75 };
76 
77 /* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
78 struct drm_virtgpu_execbuffer {
79 	__u32 flags;
80 	__u32 size;
81 	__u64 command; /* void* */
82 	__u64 bo_handles;
83 	__u32 num_bo_handles;
84 	__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
85 	__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
86 	__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
87 	__u32 num_in_syncobjs;
88 	__u32 num_out_syncobjs;
89 	__u64 in_syncobjs;
90 	__u64 out_syncobjs;
91 };
92 
93 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
94 #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
95 #define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
96 #define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
97 #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing  */
98 #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
99 #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
100 #define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
101 
102 struct drm_virtgpu_getparam {
103 	__u64 param;
104 	__u64 value;
105 };
106 
107 /* NO_BO flags? NO resource flag? */
108 /* resource flag for y_0_top */
109 struct drm_virtgpu_resource_create {
110 	__u32 target;
111 	__u32 format;
112 	__u32 bind;
113 	__u32 width;
114 	__u32 height;
115 	__u32 depth;
116 	__u32 array_size;
117 	__u32 last_level;
118 	__u32 nr_samples;
119 	__u32 flags;
120 	__u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
121 	__u32 res_handle;  /* returned by kernel */
122 	__u32 size;        /* validate transfer in the host */
123 	__u32 stride;      /* validate transfer in the host */
124 };
125 
126 struct drm_virtgpu_resource_info {
127 	__u32 bo_handle;
128 	__u32 res_handle;
129 	__u32 size;
130 	__u32 blob_mem;
131 };
132 
133 struct drm_virtgpu_3d_box {
134 	__u32 x;
135 	__u32 y;
136 	__u32 z;
137 	__u32 w;
138 	__u32 h;
139 	__u32 d;
140 };
141 
142 struct drm_virtgpu_3d_transfer_to_host {
143 	__u32 bo_handle;
144 	struct drm_virtgpu_3d_box box;
145 	__u32 level;
146 	__u32 offset;
147 	__u32 stride;
148 	__u32 layer_stride;
149 };
150 
151 struct drm_virtgpu_3d_transfer_from_host {
152 	__u32 bo_handle;
153 	struct drm_virtgpu_3d_box box;
154 	__u32 level;
155 	__u32 offset;
156 	__u32 stride;
157 	__u32 layer_stride;
158 };
159 
160 #define VIRTGPU_WAIT_NOWAIT 1 /* like it */
161 struct drm_virtgpu_3d_wait {
162 	__u32 handle; /* 0 is an invalid handle */
163 	__u32 flags;
164 };
165 
166 #define VIRTGPU_DRM_CAPSET_VIRGL 1
167 #define VIRTGPU_DRM_CAPSET_VIRGL2 2
168 #define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
169 #define VIRTGPU_DRM_CAPSET_VENUS 4
170 #define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
171 #define VIRTGPU_DRM_CAPSET_DRM 6
172 struct drm_virtgpu_get_caps {
173 	__u32 cap_set_id;
174 	__u32 cap_set_ver;
175 	__u64 addr;
176 	__u32 size;
177 	__u32 pad;
178 };
179 
180 struct drm_virtgpu_resource_create_blob {
181 #define VIRTGPU_BLOB_MEM_GUEST             0x0001
182 #define VIRTGPU_BLOB_MEM_HOST3D            0x0002
183 #define VIRTGPU_BLOB_MEM_HOST3D_GUEST      0x0003
184 
185 #define VIRTGPU_BLOB_FLAG_USE_MAPPABLE     0x0001
186 #define VIRTGPU_BLOB_FLAG_USE_SHAREABLE    0x0002
187 #define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
188 	/* zero is invalid blob_mem */
189 	__u32 blob_mem;
190 	__u32 blob_flags;
191 	__u32 bo_handle;
192 	__u32 res_handle;
193 	__u64 size;
194 
195 	/*
196 	 * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
197 	 * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
198 	 */
199 	__u32 pad;
200 	__u32 cmd_size;
201 	__u64 cmd;
202 	__u64 blob_id;
203 };
204 
205 #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID       0x0001
206 #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS       0x0002
207 #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
208 #define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME      0x0004
209 struct drm_virtgpu_context_set_param {
210 	__u64 param;
211 	__u64 value;
212 };
213 
214 struct drm_virtgpu_context_init {
215 	__u32 num_params;
216 	__u32 pad;
217 
218 	/* pointer to drm_virtgpu_context_set_param array */
219 	__u64 ctx_set_params;
220 };
221 
222 /*
223  * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
224  * effect.  The event size is sizeof(drm_event), since there is no additional
225  * payload.
226  */
227 #define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
228 
229 #define DRM_IOCTL_VIRTGPU_MAP \
230 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
231 
232 #define DRM_IOCTL_VIRTGPU_EXECBUFFER \
233 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
234 		struct drm_virtgpu_execbuffer)
235 
236 #define DRM_IOCTL_VIRTGPU_GETPARAM \
237 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
238 		struct drm_virtgpu_getparam)
239 
240 #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE			\
241 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE,	\
242 		struct drm_virtgpu_resource_create)
243 
244 #define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
245 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
246 		 struct drm_virtgpu_resource_info)
247 
248 #define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
249 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST,	\
250 		struct drm_virtgpu_3d_transfer_from_host)
251 
252 #define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
253 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST,	\
254 		struct drm_virtgpu_3d_transfer_to_host)
255 
256 #define DRM_IOCTL_VIRTGPU_WAIT				\
257 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT,	\
258 		struct drm_virtgpu_3d_wait)
259 
260 #define DRM_IOCTL_VIRTGPU_GET_CAPS \
261 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
262 	struct drm_virtgpu_get_caps)
263 
264 #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB				\
265 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB,	\
266 		struct drm_virtgpu_resource_create_blob)
267 
268 #define DRM_IOCTL_VIRTGPU_CONTEXT_INIT					\
269 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT,		\
270 		struct drm_virtgpu_context_init)
271 
272 #if defined(__cplusplus)
273 }
274 #endif
275 
276 #endif
277