xref: /linux/include/uapi/drm/vc4_drm.h (revision 8c749ce93ee69e789e46b3be98de9e0cbfcf8ed8)
1 /*
2  * Copyright © 2014-2015 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef _UAPI_VC4_DRM_H_
25 #define _UAPI_VC4_DRM_H_
26 
27 #include "drm.h"
28 
29 #define DRM_VC4_SUBMIT_CL                         0x00
30 #define DRM_VC4_WAIT_SEQNO                        0x01
31 #define DRM_VC4_WAIT_BO                           0x02
32 #define DRM_VC4_CREATE_BO                         0x03
33 #define DRM_VC4_MMAP_BO                           0x04
34 #define DRM_VC4_CREATE_SHADER_BO                  0x05
35 #define DRM_VC4_GET_HANG_STATE                    0x06
36 
37 #define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
38 #define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
39 #define DRM_IOCTL_VC4_WAIT_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
40 #define DRM_IOCTL_VC4_CREATE_BO           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
41 #define DRM_IOCTL_VC4_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
42 #define DRM_IOCTL_VC4_CREATE_SHADER_BO    DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
43 #define DRM_IOCTL_VC4_GET_HANG_STATE      DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
44 
45 struct drm_vc4_submit_rcl_surface {
46 	__u32 hindex; /* Handle index, or ~0 if not present. */
47 	__u32 offset; /* Offset to start of buffer. */
48 	/*
49 	 * Bits for either render config (color_write) or load/store packet.
50 	 * Bits should all be 0 for MSAA load/stores.
51 	 */
52 	__u16 bits;
53 
54 #define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES		(1 << 0)
55 	__u16 flags;
56 };
57 
58 /**
59  * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
60  * engine.
61  *
62  * Drivers typically use GPU BOs to store batchbuffers / command lists and
63  * their associated state.  However, because the VC4 lacks an MMU, we have to
64  * do validation of memory accesses by the GPU commands.  If we were to store
65  * our commands in BOs, we'd need to do uncached readback from them to do the
66  * validation process, which is too expensive.  Instead, userspace accumulates
67  * commands and associated state in plain memory, then the kernel copies the
68  * data to its own address space, and then validates and stores it in a GPU
69  * BO.
70  */
71 struct drm_vc4_submit_cl {
72 	/* Pointer to the binner command list.
73 	 *
74 	 * This is the first set of commands executed, which runs the
75 	 * coordinate shader to determine where primitives land on the screen,
76 	 * then writes out the state updates and draw calls necessary per tile
77 	 * to the tile allocation BO.
78 	 */
79 	__u64 bin_cl;
80 
81 	/* Pointer to the shader records.
82 	 *
83 	 * Shader records are the structures read by the hardware that contain
84 	 * pointers to uniforms, shaders, and vertex attributes.  The
85 	 * reference to the shader record has enough information to determine
86 	 * how many pointers are necessary (fixed number for shaders/uniforms,
87 	 * and an attribute count), so those BO indices into bo_handles are
88 	 * just stored as __u32s before each shader record passed in.
89 	 */
90 	__u64 shader_rec;
91 
92 	/* Pointer to uniform data and texture handles for the textures
93 	 * referenced by the shader.
94 	 *
95 	 * For each shader state record, there is a set of uniform data in the
96 	 * order referenced by the record (FS, VS, then CS).  Each set of
97 	 * uniform data has a __u32 index into bo_handles per texture
98 	 * sample operation, in the order the QPU_W_TMUn_S writes appear in
99 	 * the program.  Following the texture BO handle indices is the actual
100 	 * uniform data.
101 	 *
102 	 * The individual uniform state blocks don't have sizes passed in,
103 	 * because the kernel has to determine the sizes anyway during shader
104 	 * code validation.
105 	 */
106 	__u64 uniforms;
107 	__u64 bo_handles;
108 
109 	/* Size in bytes of the binner command list. */
110 	__u32 bin_cl_size;
111 	/* Size in bytes of the set of shader records. */
112 	__u32 shader_rec_size;
113 	/* Number of shader records.
114 	 *
115 	 * This could just be computed from the contents of shader_records and
116 	 * the address bits of references to them from the bin CL, but it
117 	 * keeps the kernel from having to resize some allocations it makes.
118 	 */
119 	__u32 shader_rec_count;
120 	/* Size in bytes of the uniform state. */
121 	__u32 uniforms_size;
122 
123 	/* Number of BO handles passed in (size is that times 4). */
124 	__u32 bo_handle_count;
125 
126 	/* RCL setup: */
127 	__u16 width;
128 	__u16 height;
129 	__u8 min_x_tile;
130 	__u8 min_y_tile;
131 	__u8 max_x_tile;
132 	__u8 max_y_tile;
133 	struct drm_vc4_submit_rcl_surface color_read;
134 	struct drm_vc4_submit_rcl_surface color_write;
135 	struct drm_vc4_submit_rcl_surface zs_read;
136 	struct drm_vc4_submit_rcl_surface zs_write;
137 	struct drm_vc4_submit_rcl_surface msaa_color_write;
138 	struct drm_vc4_submit_rcl_surface msaa_zs_write;
139 	__u32 clear_color[2];
140 	__u32 clear_z;
141 	__u8 clear_s;
142 
143 	__u32 pad:24;
144 
145 #define VC4_SUBMIT_CL_USE_CLEAR_COLOR			(1 << 0)
146 	__u32 flags;
147 
148 	/* Returned value of the seqno of this render job (for the
149 	 * wait ioctl).
150 	 */
151 	__u64 seqno;
152 };
153 
154 /**
155  * struct drm_vc4_wait_seqno - ioctl argument for waiting for
156  * DRM_VC4_SUBMIT_CL completion using its returned seqno.
157  *
158  * timeout_ns is the timeout in nanoseconds, where "0" means "don't
159  * block, just return the status."
160  */
161 struct drm_vc4_wait_seqno {
162 	__u64 seqno;
163 	__u64 timeout_ns;
164 };
165 
166 /**
167  * struct drm_vc4_wait_bo - ioctl argument for waiting for
168  * completion of the last DRM_VC4_SUBMIT_CL on a BO.
169  *
170  * This is useful for cases where multiple processes might be
171  * rendering to a BO and you want to wait for all rendering to be
172  * completed.
173  */
174 struct drm_vc4_wait_bo {
175 	__u32 handle;
176 	__u32 pad;
177 	__u64 timeout_ns;
178 };
179 
180 /**
181  * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
182  *
183  * There are currently no values for the flags argument, but it may be
184  * used in a future extension.
185  */
186 struct drm_vc4_create_bo {
187 	__u32 size;
188 	__u32 flags;
189 	/** Returned GEM handle for the BO. */
190 	__u32 handle;
191 	__u32 pad;
192 };
193 
194 /**
195  * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
196  *
197  * This doesn't actually perform an mmap.  Instead, it returns the
198  * offset you need to use in an mmap on the DRM device node.  This
199  * means that tools like valgrind end up knowing about the mapped
200  * memory.
201  *
202  * There are currently no values for the flags argument, but it may be
203  * used in a future extension.
204  */
205 struct drm_vc4_mmap_bo {
206 	/** Handle for the object being mapped. */
207 	__u32 handle;
208 	__u32 flags;
209 	/** offset into the drm node to use for subsequent mmap call. */
210 	__u64 offset;
211 };
212 
213 /**
214  * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
215  * shader BOs.
216  *
217  * Since allowing a shader to be overwritten while it's also being
218  * executed from would allow privlege escalation, shaders must be
219  * created using this ioctl, and they can't be mmapped later.
220  */
221 struct drm_vc4_create_shader_bo {
222 	/* Size of the data argument. */
223 	__u32 size;
224 	/* Flags, currently must be 0. */
225 	__u32 flags;
226 
227 	/* Pointer to the data. */
228 	__u64 data;
229 
230 	/** Returned GEM handle for the BO. */
231 	__u32 handle;
232 	/* Pad, must be 0. */
233 	__u32 pad;
234 };
235 
236 struct drm_vc4_get_hang_state_bo {
237 	__u32 handle;
238 	__u32 paddr;
239 	__u32 size;
240 	__u32 pad;
241 };
242 
243 /**
244  * struct drm_vc4_hang_state - ioctl argument for collecting state
245  * from a GPU hang for analysis.
246 */
247 struct drm_vc4_get_hang_state {
248 	/** Pointer to array of struct drm_vc4_get_hang_state_bo. */
249 	__u64 bo;
250 	/**
251 	 * On input, the size of the bo array.  Output is the number
252 	 * of bos to be returned.
253 	 */
254 	__u32 bo_count;
255 
256 	__u32 start_bin, start_render;
257 
258 	__u32 ct0ca, ct0ea;
259 	__u32 ct1ca, ct1ea;
260 	__u32 ct0cs, ct1cs;
261 	__u32 ct0ra0, ct1ra0;
262 
263 	__u32 bpca, bpcs;
264 	__u32 bpoa, bpos;
265 
266 	__u32 vpmbase;
267 
268 	__u32 dbge;
269 	__u32 fdbgo;
270 	__u32 fdbgb;
271 	__u32 fdbgr;
272 	__u32 fdbgs;
273 	__u32 errstat;
274 
275 	/* Pad that we may save more registers into in the future. */
276 	__u32 pad[16];
277 };
278 
279 #endif /* _UAPI_VC4_DRM_H_ */
280