xref: /linux/include/uapi/drm/v3d_drm.h (revision e7b2b108cdeab76a7e7324459e50b0c1214c0386)
1 /*
2  * Copyright © 2014-2018 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef _V3D_DRM_H_
25 #define _V3D_DRM_H_
26 
27 #include "drm.h"
28 
29 #if defined(__cplusplus)
30 extern "C" {
31 #endif
32 
33 #define DRM_V3D_SUBMIT_CL                         0x00
34 #define DRM_V3D_WAIT_BO                           0x01
35 #define DRM_V3D_CREATE_BO                         0x02
36 #define DRM_V3D_MMAP_BO                           0x03
37 #define DRM_V3D_GET_PARAM                         0x04
38 #define DRM_V3D_GET_BO_OFFSET                     0x05
39 #define DRM_V3D_SUBMIT_TFU                        0x06
40 #define DRM_V3D_SUBMIT_CSD                        0x07
41 #define DRM_V3D_PERFMON_CREATE                    0x08
42 #define DRM_V3D_PERFMON_DESTROY                   0x09
43 #define DRM_V3D_PERFMON_GET_VALUES                0x0a
44 #define DRM_V3D_SUBMIT_CPU                        0x0b
45 
46 #define DRM_IOCTL_V3D_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
47 #define DRM_IOCTL_V3D_WAIT_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
48 #define DRM_IOCTL_V3D_CREATE_BO           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
49 #define DRM_IOCTL_V3D_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
50 #define DRM_IOCTL_V3D_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
51 #define DRM_IOCTL_V3D_GET_BO_OFFSET       DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
52 #define DRM_IOCTL_V3D_SUBMIT_TFU          DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
53 #define DRM_IOCTL_V3D_SUBMIT_CSD          DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
54 #define DRM_IOCTL_V3D_PERFMON_CREATE      DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \
55 						   struct drm_v3d_perfmon_create)
56 #define DRM_IOCTL_V3D_PERFMON_DESTROY     DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \
57 						   struct drm_v3d_perfmon_destroy)
58 #define DRM_IOCTL_V3D_PERFMON_GET_VALUES  DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
59 						   struct drm_v3d_perfmon_get_values)
60 #define DRM_IOCTL_V3D_SUBMIT_CPU          DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
61 
62 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE             0x01
63 #define DRM_V3D_SUBMIT_EXTENSION		  0x02
64 
65 /* struct drm_v3d_extension - ioctl extensions
66  *
67  * Linked-list of generic extensions where the id identify which struct is
68  * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
69  * the extension type.
70  */
71 struct drm_v3d_extension {
72 	__u64 next;
73 	__u32 id;
74 #define DRM_V3D_EXT_ID_MULTI_SYNC			0x01
75 #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD		0x02
76 #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY		0x03
77 #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY	0x04
78 #define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY	0x05
79 #define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY	0x06
80 #define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY	0x07
81 	__u32 flags; /* mbz */
82 };
83 
84 /* struct drm_v3d_sem - wait/signal semaphore
85  *
86  * If binary semaphore, it only takes syncobj handle and ignores flags and
87  * point fields. Point is defined for timeline syncobj feature.
88  */
89 struct drm_v3d_sem {
90 	__u32 handle; /* syncobj */
91 	/* rsv below, for future uses */
92 	__u32 flags;
93 	__u64 point;  /* for timeline sem support */
94 	__u64 mbz[2]; /* must be zero, rsv */
95 };
96 
97 /* Enum for each of the V3D queues. */
98 enum v3d_queue {
99 	V3D_BIN,
100 	V3D_RENDER,
101 	V3D_TFU,
102 	V3D_CSD,
103 	V3D_CACHE_CLEAN,
104 	V3D_CPU,
105 };
106 
107 /**
108  * struct drm_v3d_multi_sync - ioctl extension to add support multiples
109  * syncobjs for commands submission.
110  *
111  * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
112  * this extension to define wait and signal dependencies, instead of single
113  * in/out sync entries on submitting commands. The field flags is used to
114  * determine the stage to set wait dependencies.
115  */
116 struct drm_v3d_multi_sync {
117 	struct drm_v3d_extension base;
118 	/* Array of wait and signal semaphores */
119 	__u64 in_syncs;
120 	__u64 out_syncs;
121 
122 	/* Number of entries */
123 	__u32 in_sync_count;
124 	__u32 out_sync_count;
125 
126 	/* set the stage (v3d_queue) to sync */
127 	__u32 wait_stage;
128 
129 	__u32 pad; /* mbz */
130 };
131 
132 /**
133  * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
134  * engine.
135  *
136  * This asks the kernel to have the GPU execute an optional binner
137  * command list, and a render command list.
138  *
139  * The L1T, slice, L2C, L2T, and GCA caches will be flushed before
140  * each CL executes.  The VCD cache should be flushed (if necessary)
141  * by the submitted CLs.  The TLB writes are guaranteed to have been
142  * flushed by the time the render done IRQ happens, which is the
143  * trigger for out_sync.  Any dirtying of cachelines by the job (only
144  * possible using TMU writes) must be flushed by the caller using the
145  * DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag.
146  */
147 struct drm_v3d_submit_cl {
148 	/* Pointer to the binner command list.
149 	 *
150 	 * This is the first set of commands executed, which runs the
151 	 * coordinate shader to determine where primitives land on the screen,
152 	 * then writes out the state updates and draw calls necessary per tile
153 	 * to the tile allocation BO.
154 	 *
155 	 * This BCL will block on any previous BCL submitted on the
156 	 * same FD, but not on any RCL or BCLs submitted by other
157 	 * clients -- that is left up to the submitter to control
158 	 * using in_sync_bcl if necessary.
159 	 */
160 	__u32 bcl_start;
161 
162 	/** End address of the BCL (first byte after the BCL) */
163 	__u32 bcl_end;
164 
165 	/* Offset of the render command list.
166 	 *
167 	 * This is the second set of commands executed, which will either
168 	 * execute the tiles that have been set up by the BCL, or a fixed set
169 	 * of tiles (in the case of RCL-only blits).
170 	 *
171 	 * This RCL will block on this submit's BCL, and any previous
172 	 * RCL submitted on the same FD, but not on any RCL or BCLs
173 	 * submitted by other clients -- that is left up to the
174 	 * submitter to control using in_sync_rcl if necessary.
175 	 */
176 	__u32 rcl_start;
177 
178 	/** End address of the RCL (first byte after the RCL) */
179 	__u32 rcl_end;
180 
181 	/** An optional sync object to wait on before starting the BCL. */
182 	__u32 in_sync_bcl;
183 	/** An optional sync object to wait on before starting the RCL. */
184 	__u32 in_sync_rcl;
185 	/** An optional sync object to place the completion fence in. */
186 	__u32 out_sync;
187 
188 	/* Offset of the tile alloc memory
189 	 *
190 	 * This is optional on V3D 3.3 (where the CL can set the value) but
191 	 * required on V3D 4.1.
192 	 */
193 	__u32 qma;
194 
195 	/** Size of the tile alloc memory. */
196 	__u32 qms;
197 
198 	/** Offset of the tile state data array. */
199 	__u32 qts;
200 
201 	/* Pointer to a u32 array of the BOs that are referenced by the job.
202 	 */
203 	__u64 bo_handles;
204 
205 	/* Number of BO handles passed in (size is that times 4). */
206 	__u32 bo_handle_count;
207 
208 	/* DRM_V3D_SUBMIT_* properties */
209 	__u32 flags;
210 
211 	/* ID of the perfmon to attach to this job. 0 means no perfmon. */
212 	__u32 perfmon_id;
213 
214 	__u32 pad;
215 
216 	/* Pointer to an array of ioctl extensions*/
217 	__u64 extensions;
218 };
219 
220 /**
221  * struct drm_v3d_wait_bo - ioctl argument for waiting for
222  * completion of the last DRM_V3D_SUBMIT_CL on a BO.
223  *
224  * This is useful for cases where multiple processes might be
225  * rendering to a BO and you want to wait for all rendering to be
226  * completed.
227  */
228 struct drm_v3d_wait_bo {
229 	__u32 handle;
230 	__u32 pad;
231 	__u64 timeout_ns;
232 };
233 
234 /**
235  * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
236  *
237  * There are currently no values for the flags argument, but it may be
238  * used in a future extension.
239  */
240 struct drm_v3d_create_bo {
241 	__u32 size;
242 	__u32 flags;
243 	/** Returned GEM handle for the BO. */
244 	__u32 handle;
245 	/**
246 	 * Returned offset for the BO in the V3D address space.  This offset
247 	 * is private to the DRM fd and is valid for the lifetime of the GEM
248 	 * handle.
249 	 *
250 	 * This offset value will always be nonzero, since various HW
251 	 * units treat 0 specially.
252 	 */
253 	__u32 offset;
254 };
255 
256 /**
257  * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
258  *
259  * This doesn't actually perform an mmap.  Instead, it returns the
260  * offset you need to use in an mmap on the DRM device node.  This
261  * means that tools like valgrind end up knowing about the mapped
262  * memory.
263  *
264  * There are currently no values for the flags argument, but it may be
265  * used in a future extension.
266  */
267 struct drm_v3d_mmap_bo {
268 	/** Handle for the object being mapped. */
269 	__u32 handle;
270 	__u32 flags;
271 	/** offset into the drm node to use for subsequent mmap call. */
272 	__u64 offset;
273 };
274 
275 enum drm_v3d_param {
276 	DRM_V3D_PARAM_V3D_UIFCFG,
277 	DRM_V3D_PARAM_V3D_HUB_IDENT1,
278 	DRM_V3D_PARAM_V3D_HUB_IDENT2,
279 	DRM_V3D_PARAM_V3D_HUB_IDENT3,
280 	DRM_V3D_PARAM_V3D_CORE0_IDENT0,
281 	DRM_V3D_PARAM_V3D_CORE0_IDENT1,
282 	DRM_V3D_PARAM_V3D_CORE0_IDENT2,
283 	DRM_V3D_PARAM_SUPPORTS_TFU,
284 	DRM_V3D_PARAM_SUPPORTS_CSD,
285 	DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
286 	DRM_V3D_PARAM_SUPPORTS_PERFMON,
287 	DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
288 	DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
289 };
290 
291 struct drm_v3d_get_param {
292 	__u32 param;
293 	__u32 pad;
294 	__u64 value;
295 };
296 
297 /**
298  * Returns the offset for the BO in the V3D address space for this DRM fd.
299  * This is the same value returned by drm_v3d_create_bo, if that was called
300  * from this DRM fd.
301  */
302 struct drm_v3d_get_bo_offset {
303 	__u32 handle;
304 	__u32 offset;
305 };
306 
307 struct drm_v3d_submit_tfu {
308 	__u32 icfg;
309 	__u32 iia;
310 	__u32 iis;
311 	__u32 ica;
312 	__u32 iua;
313 	__u32 ioa;
314 	__u32 ios;
315 	__u32 coef[4];
316 	/* First handle is the output BO, following are other inputs.
317 	 * 0 for unused.
318 	 */
319 	__u32 bo_handles[4];
320 	/* sync object to block on before running the TFU job.  Each TFU
321 	 * job will execute in the order submitted to its FD.  Synchronization
322 	 * against rendering jobs requires using sync objects.
323 	 */
324 	__u32 in_sync;
325 	/* Sync object to signal when the TFU job is done. */
326 	__u32 out_sync;
327 
328 	__u32 flags;
329 
330 	/* Pointer to an array of ioctl extensions*/
331 	__u64 extensions;
332 
333 	struct {
334 		__u32 ioc;
335 		__u32 pad;
336 	} v71;
337 };
338 
339 /* Submits a compute shader for dispatch.  This job will block on any
340  * previous compute shaders submitted on this fd, and any other
341  * synchronization must be performed with in_sync/out_sync.
342  */
343 struct drm_v3d_submit_csd {
344 	__u32 cfg[7];
345 	__u32 coef[4];
346 
347 	/* Pointer to a u32 array of the BOs that are referenced by the job.
348 	 */
349 	__u64 bo_handles;
350 
351 	/* Number of BO handles passed in (size is that times 4). */
352 	__u32 bo_handle_count;
353 
354 	/* sync object to block on before running the CSD job.  Each
355 	 * CSD job will execute in the order submitted to its FD.
356 	 * Synchronization against rendering/TFU jobs or CSD from
357 	 * other fds requires using sync objects.
358 	 */
359 	__u32 in_sync;
360 	/* Sync object to signal when the CSD job is done. */
361 	__u32 out_sync;
362 
363 	/* ID of the perfmon to attach to this job. 0 means no perfmon. */
364 	__u32 perfmon_id;
365 
366 	/* Pointer to an array of ioctl extensions*/
367 	__u64 extensions;
368 
369 	__u32 flags;
370 
371 	__u32 pad;
372 };
373 
374 /**
375  * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
376  * indirect CSD
377  *
378  * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
379  * points to this extension to define a indirect CSD submission. It creates a
380  * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
381  * dependencies and, once they are signaled, it updates the CSD job config
382  * before allowing the CSD job execution.
383  */
384 struct drm_v3d_indirect_csd {
385 	struct drm_v3d_extension base;
386 
387 	/* Indirect CSD */
388 	struct drm_v3d_submit_csd submit;
389 
390 	/* Handle of the indirect BO, that should be also attached to the
391 	 * indirect CSD.
392 	 */
393 	__u32 indirect;
394 
395 	/* Offset within the BO where the workgroup counts are stored */
396 	__u32 offset;
397 
398 	/* Workgroups size */
399 	__u32 wg_size;
400 
401 	/* Indices of the uniforms with the workgroup dispatch counts
402 	 * in the uniform stream. If the uniform rewrite is not needed,
403 	 * the offset must be 0xffffffff.
404 	 */
405 	__u32 wg_uniform_offsets[3];
406 };
407 
408 /**
409  * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
410  * a timestamp query
411  *
412  * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
413  * this extension to define a timestamp query submission. This CPU job will
414  * calculate the timestamp query and update the query value within the
415  * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
416  * query availability.
417  */
418 struct drm_v3d_timestamp_query {
419 	struct drm_v3d_extension base;
420 
421 	/* Array of queries' offsets within the timestamp BO for their value */
422 	__u64 offsets;
423 
424 	/* Array of timestamp's syncobjs to indicate its availability */
425 	__u64 syncs;
426 
427 	/* Number of queries */
428 	__u32 count;
429 
430 	/* mbz */
431 	__u32 pad;
432 };
433 
434 /**
435  * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
436  * reset timestamp queries
437  *
438  * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
439  * points to this extension to define a reset timestamp submission. This CPU
440  * job will reset the timestamp queries based on value offset of the first
441  * query. Moreover, it will reset the timestamp syncobj to reset query
442  * availability.
443  */
444 struct drm_v3d_reset_timestamp_query {
445 	struct drm_v3d_extension base;
446 
447 	/* Array of timestamp's syncobjs to indicate its availability */
448 	__u64 syncs;
449 
450 	/* Offset of the first query within the timestamp BO for its value */
451 	__u32 offset;
452 
453 	/* Number of queries */
454 	__u32 count;
455 };
456 
457 /**
458  * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
459  * query results to a buffer
460  *
461  * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
462  * points to this extension to define a copy timestamp query submission. This
463  * CPU job will copy the timestamp queries results to a BO with the offset
464  * and stride defined in the extension.
465  */
466 struct drm_v3d_copy_timestamp_query {
467 	struct drm_v3d_extension base;
468 
469 	/* Define if should write to buffer using 64 or 32 bits */
470 	__u8 do_64bit;
471 
472 	/* Define if it can write to buffer even if the query is not available */
473 	__u8 do_partial;
474 
475 	/* Define if it should write availability bit to buffer */
476 	__u8 availability_bit;
477 
478 	/* mbz */
479 	__u8 pad;
480 
481 	/* Offset of the buffer in the BO */
482 	__u32 offset;
483 
484 	/* Stride of the buffer in the BO */
485 	__u32 stride;
486 
487 	/* Number of queries */
488 	__u32 count;
489 
490 	/* Array of queries' offsets within the timestamp BO for their value */
491 	__u64 offsets;
492 
493 	/* Array of timestamp's syncobjs to indicate its availability */
494 	__u64 syncs;
495 };
496 
497 /**
498  * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
499  * reset performance queries
500  *
501  * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
502  * points to this extension to define a reset performance submission. This CPU
503  * job will reset the performance queries by resetting the values of the
504  * performance monitors. Moreover, it will reset the syncobj to reset query
505  * availability.
506  */
507 struct drm_v3d_reset_performance_query {
508 	struct drm_v3d_extension base;
509 
510 	/* Array of performance queries's syncobjs to indicate its availability */
511 	__u64 syncs;
512 
513 	/* Number of queries */
514 	__u32 count;
515 
516 	/* Number of performance monitors */
517 	__u32 nperfmons;
518 
519 	/* Array of u64 user-pointers that point to an array of kperfmon_ids */
520 	__u64 kperfmon_ids;
521 };
522 
523 /**
524  * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
525  * performance query results to a buffer
526  *
527  * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
528  * points to this extension to define a copy performance query submission. This
529  * CPU job will copy the performance queries results to a BO with the offset
530  * and stride defined in the extension.
531  */
532 struct drm_v3d_copy_performance_query {
533 	struct drm_v3d_extension base;
534 
535 	/* Define if should write to buffer using 64 or 32 bits */
536 	__u8 do_64bit;
537 
538 	/* Define if it can write to buffer even if the query is not available */
539 	__u8 do_partial;
540 
541 	/* Define if it should write availability bit to buffer */
542 	__u8 availability_bit;
543 
544 	/* mbz */
545 	__u8 pad;
546 
547 	/* Offset of the buffer in the BO */
548 	__u32 offset;
549 
550 	/* Stride of the buffer in the BO */
551 	__u32 stride;
552 
553 	/* Number of performance monitors */
554 	__u32 nperfmons;
555 
556 	/* Number of performance counters related to this query pool */
557 	__u32 ncounters;
558 
559 	/* Number of queries */
560 	__u32 count;
561 
562 	/* Array of performance queries's syncobjs to indicate its availability */
563 	__u64 syncs;
564 
565 	/* Array of u64 user-pointers that point to an array of kperfmon_ids */
566 	__u64 kperfmon_ids;
567 };
568 
569 struct drm_v3d_submit_cpu {
570 	/* Pointer to a u32 array of the BOs that are referenced by the job.
571 	 *
572 	 * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
573 	 * that contains the workgroup counts.
574 	 *
575 	 * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
576 	 * that will contain the timestamp.
577 	 *
578 	 * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
579 	 * one BO, that contains the timestamp.
580 	 *
581 	 * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
582 	 * BOs. The first is the BO where the timestamp queries will be written
583 	 * to. The second is the BO that contains the timestamp.
584 	 *
585 	 * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
586 	 * BOs.
587 	 *
588 	 * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
589 	 * BO, where the performance queries will be written.
590 	 */
591 	__u64 bo_handles;
592 
593 	/* Number of BO handles passed in (size is that times 4). */
594 	__u32 bo_handle_count;
595 
596 	__u32 flags;
597 
598 	/* Pointer to an array of ioctl extensions*/
599 	__u64 extensions;
600 };
601 
602 enum {
603 	V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
604 	V3D_PERFCNT_FEP_VALID_PRIMS,
605 	V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS,
606 	V3D_PERFCNT_FEP_VALID_QUADS,
607 	V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL,
608 	V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL,
609 	V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS,
610 	V3D_PERFCNT_TLB_QUADS_ZERO_COV,
611 	V3D_PERFCNT_TLB_QUADS_NONZERO_COV,
612 	V3D_PERFCNT_TLB_QUADS_WRITTEN,
613 	V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD,
614 	V3D_PERFCNT_PTB_PRIM_CLIP,
615 	V3D_PERFCNT_PTB_PRIM_REV,
616 	V3D_PERFCNT_QPU_IDLE_CYCLES,
617 	V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER,
618 	V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG,
619 	V3D_PERFCNT_QPU_CYCLES_VALID_INSTR,
620 	V3D_PERFCNT_QPU_CYCLES_TMU_STALL,
621 	V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL,
622 	V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL,
623 	V3D_PERFCNT_QPU_IC_HIT,
624 	V3D_PERFCNT_QPU_IC_MISS,
625 	V3D_PERFCNT_QPU_UC_HIT,
626 	V3D_PERFCNT_QPU_UC_MISS,
627 	V3D_PERFCNT_TMU_TCACHE_ACCESS,
628 	V3D_PERFCNT_TMU_TCACHE_MISS,
629 	V3D_PERFCNT_VPM_VDW_STALL,
630 	V3D_PERFCNT_VPM_VCD_STALL,
631 	V3D_PERFCNT_BIN_ACTIVE,
632 	V3D_PERFCNT_RDR_ACTIVE,
633 	V3D_PERFCNT_L2T_HITS,
634 	V3D_PERFCNT_L2T_MISSES,
635 	V3D_PERFCNT_CYCLE_COUNT,
636 	V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER,
637 	V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT,
638 	V3D_PERFCNT_PTB_PRIMS_BINNED,
639 	V3D_PERFCNT_AXI_WRITES_WATCH_0,
640 	V3D_PERFCNT_AXI_READS_WATCH_0,
641 	V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0,
642 	V3D_PERFCNT_AXI_READ_STALLS_WATCH_0,
643 	V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0,
644 	V3D_PERFCNT_AXI_READ_BYTES_WATCH_0,
645 	V3D_PERFCNT_AXI_WRITES_WATCH_1,
646 	V3D_PERFCNT_AXI_READS_WATCH_1,
647 	V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1,
648 	V3D_PERFCNT_AXI_READ_STALLS_WATCH_1,
649 	V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1,
650 	V3D_PERFCNT_AXI_READ_BYTES_WATCH_1,
651 	V3D_PERFCNT_TLB_PARTIAL_QUADS,
652 	V3D_PERFCNT_TMU_CONFIG_ACCESSES,
653 	V3D_PERFCNT_L2T_NO_ID_STALL,
654 	V3D_PERFCNT_L2T_COM_QUE_STALL,
655 	V3D_PERFCNT_L2T_TMU_WRITES,
656 	V3D_PERFCNT_TMU_ACTIVE_CYCLES,
657 	V3D_PERFCNT_TMU_STALLED_CYCLES,
658 	V3D_PERFCNT_CLE_ACTIVE,
659 	V3D_PERFCNT_L2T_TMU_READS,
660 	V3D_PERFCNT_L2T_CLE_READS,
661 	V3D_PERFCNT_L2T_VCD_READS,
662 	V3D_PERFCNT_L2T_TMUCFG_READS,
663 	V3D_PERFCNT_L2T_SLC0_READS,
664 	V3D_PERFCNT_L2T_SLC1_READS,
665 	V3D_PERFCNT_L2T_SLC2_READS,
666 	V3D_PERFCNT_L2T_TMU_W_MISSES,
667 	V3D_PERFCNT_L2T_TMU_R_MISSES,
668 	V3D_PERFCNT_L2T_CLE_MISSES,
669 	V3D_PERFCNT_L2T_VCD_MISSES,
670 	V3D_PERFCNT_L2T_TMUCFG_MISSES,
671 	V3D_PERFCNT_L2T_SLC0_MISSES,
672 	V3D_PERFCNT_L2T_SLC1_MISSES,
673 	V3D_PERFCNT_L2T_SLC2_MISSES,
674 	V3D_PERFCNT_CORE_MEM_WRITES,
675 	V3D_PERFCNT_L2T_MEM_WRITES,
676 	V3D_PERFCNT_PTB_MEM_WRITES,
677 	V3D_PERFCNT_TLB_MEM_WRITES,
678 	V3D_PERFCNT_CORE_MEM_READS,
679 	V3D_PERFCNT_L2T_MEM_READS,
680 	V3D_PERFCNT_PTB_MEM_READS,
681 	V3D_PERFCNT_PSE_MEM_READS,
682 	V3D_PERFCNT_TLB_MEM_READS,
683 	V3D_PERFCNT_GMP_MEM_READS,
684 	V3D_PERFCNT_PTB_W_MEM_WORDS,
685 	V3D_PERFCNT_TLB_W_MEM_WORDS,
686 	V3D_PERFCNT_PSE_R_MEM_WORDS,
687 	V3D_PERFCNT_TLB_R_MEM_WORDS,
688 	V3D_PERFCNT_TMU_MRU_HITS,
689 	V3D_PERFCNT_COMPUTE_ACTIVE,
690 	V3D_PERFCNT_NUM,
691 };
692 
693 #define DRM_V3D_MAX_PERF_COUNTERS                 32
694 
695 struct drm_v3d_perfmon_create {
696 	__u32 id;
697 	__u32 ncounters;
698 	__u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
699 };
700 
701 struct drm_v3d_perfmon_destroy {
702 	__u32 id;
703 };
704 
705 /*
706  * Returns the values of the performance counters tracked by this
707  * perfmon (as an array of ncounters u64 values).
708  *
709  * No implicit synchronization is performed, so the user has to
710  * guarantee that any jobs using this perfmon have already been
711  * completed  (probably by blocking on the seqno returned by the
712  * last exec that used the perfmon).
713  */
714 struct drm_v3d_perfmon_get_values {
715 	__u32 id;
716 	__u32 pad;
717 	__u64 values_ptr;
718 };
719 
720 #if defined(__cplusplus)
721 }
722 #endif
723 
724 #endif /* _V3D_DRM_H_ */
725