xref: /linux/include/uapi/drm/panthor_drm.h (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 /* SPDX-License-Identifier: MIT */
2 /* Copyright (C) 2023 Collabora ltd. */
3 #ifndef _PANTHOR_DRM_H_
4 #define _PANTHOR_DRM_H_
5 
6 #include "drm.h"
7 
8 #if defined(__cplusplus)
9 extern "C" {
10 #endif
11 
12 /**
13  * DOC: Introduction
14  *
15  * This documentation describes the Panthor IOCTLs.
16  *
17  * Just a few generic rules about the data passed to the Panthor IOCTLs:
18  *
19  * - Structures must be aligned on 64-bit/8-byte. If the object is not
20  *   naturally aligned, a padding field must be added.
21  * - Fields must be explicitly aligned to their natural type alignment with
22  *   pad[0..N] fields.
23  * - All padding fields will be checked by the driver to make sure they are
24  *   zeroed.
25  * - Flags can be added, but not removed/replaced.
26  * - New fields can be added to the main structures (the structures
27  *   directly passed to the ioctl). Those fields can be added at the end of
28  *   the structure, or replace existing padding fields. Any new field being
29  *   added must preserve the behavior that existed before those fields were
30  *   added when a value of zero is passed.
31  * - New fields can be added to indirect objects (objects pointed by the
32  *   main structure), iff those objects are passed a size to reflect the
33  *   size known by the userspace driver (see drm_panthor_obj_array::stride
34  *   or drm_panthor_dev_query::size).
35  * - If the kernel driver is too old to know some fields, those will be
36  *   ignored if zero, and otherwise rejected (and so will be zero on output).
37  * - If userspace is too old to know some fields, those will be zeroed
38  *   (input) before the structure is parsed by the kernel driver.
39  * - Each new flag/field addition must come with a driver version update so
40  *   the userspace driver doesn't have to trial and error to know which
41  *   flags are supported.
42  * - Structures should not contain unions, as this would defeat the
43  *   extensibility of such structures.
44  * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
45  *   at the end of the drm_panthor_ioctl_id enum.
46  */
47 
48 /**
49  * DOC: MMIO regions exposed to userspace.
50  *
51  * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
52  *
53  * File offset for all MMIO regions being exposed to userspace. Don't use
54  * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
55  * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
56  * different offset on 32-bit and 64-bit systems.
57  *
58  * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
59  *
60  * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
61  * GPU cache flushing through CS instructions, but the flush reduction
62  * mechanism requires a flush_id. This flush_id could be queried with an
63  * ioctl, but Arm provides a well-isolated register page containing only this
64  * read-only register, so let's expose this page through a static mmap offset
65  * and allow direct mapping of this MMIO region so we can avoid the
66  * user <-> kernel round-trip.
67  */
68 #define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT	(1ull << 43)
69 #define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT	(1ull << 56)
70 #define DRM_PANTHOR_USER_MMIO_OFFSET		(sizeof(unsigned long) < 8 ? \
71 						 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
72 						 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
73 #define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET	(DRM_PANTHOR_USER_MMIO_OFFSET | 0)
74 
75 /**
76  * DOC: IOCTL IDs
77  *
78  * enum drm_panthor_ioctl_id - IOCTL IDs
79  *
80  * Place new ioctls at the end, don't re-order, don't replace or remove entries.
81  *
82  * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
83  * definitions instead.
84  */
85 enum drm_panthor_ioctl_id {
86 	/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
87 	DRM_PANTHOR_DEV_QUERY = 0,
88 
89 	/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
90 	DRM_PANTHOR_VM_CREATE,
91 
92 	/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
93 	DRM_PANTHOR_VM_DESTROY,
94 
95 	/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
96 	DRM_PANTHOR_VM_BIND,
97 
98 	/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
99 	DRM_PANTHOR_VM_GET_STATE,
100 
101 	/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
102 	DRM_PANTHOR_BO_CREATE,
103 
104 	/**
105 	 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
106 	 * mmap to map a GEM object.
107 	 */
108 	DRM_PANTHOR_BO_MMAP_OFFSET,
109 
110 	/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
111 	DRM_PANTHOR_GROUP_CREATE,
112 
113 	/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
114 	DRM_PANTHOR_GROUP_DESTROY,
115 
116 	/**
117 	 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
118 	 * to a specific scheduling group.
119 	 */
120 	DRM_PANTHOR_GROUP_SUBMIT,
121 
122 	/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
123 	DRM_PANTHOR_GROUP_GET_STATE,
124 
125 	/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
126 	DRM_PANTHOR_TILER_HEAP_CREATE,
127 
128 	/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
129 	DRM_PANTHOR_TILER_HEAP_DESTROY,
130 
131 	/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
132 	DRM_PANTHOR_BO_SET_LABEL,
133 
134 	/**
135 	 * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
136 	 *
137 	 * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
138 	 * type seen by the process that manipulates the FD, such that a 32-bit process can
139 	 * always map the user MMIO ranges. But this approach doesn't work well for emulators
140 	 * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
141 	 * code. In that case, the kernel thinks it's the 64-bit process and assumes
142 	 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
143 	 * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
144 	 * pgoff_t size.
145 	 */
146 	DRM_PANTHOR_SET_USER_MMIO_OFFSET,
147 
148 	/** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */
149 	DRM_PANTHOR_BO_SYNC,
150 
151 	/**
152 	 * @DRM_PANTHOR_BO_QUERY_INFO: Query information about a BO.
153 	 *
154 	 * This is useful for imported BOs.
155 	 */
156 	DRM_PANTHOR_BO_QUERY_INFO,
157 };
158 
159 /**
160  * DOC: IOCTL arguments
161  */
162 
163 /**
164  * struct drm_panthor_obj_array - Object array.
165  *
166  * This object is used to pass an array of objects whose size is subject to changes in
167  * future versions of the driver. In order to support this mutability, we pass a stride
168  * describing the size of the object as known by userspace.
169  *
170  * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
171  * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
172  * the object size.
173  */
174 struct drm_panthor_obj_array {
175 	/** @stride: Stride of object struct. Used for versioning. */
176 	__u32 stride;
177 
178 	/** @count: Number of objects in the array. */
179 	__u32 count;
180 
181 	/** @array: User pointer to an array of objects. */
182 	__u64 array;
183 };
184 
185 /**
186  * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
187  * @cnt: Number of elements in the array.
188  * @ptr: Pointer to the array to pass to the kernel.
189  *
190  * Macro initializing a drm_panthor_obj_array based on the object size as known
191  * by userspace.
192  */
193 #define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
194 	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
195 
196 /**
197  * enum drm_panthor_sync_op_flags - Synchronization operation flags.
198  */
199 enum drm_panthor_sync_op_flags {
200 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
201 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
202 
203 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
204 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
205 
206 	/**
207 	 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
208 	 * object type.
209 	 */
210 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
211 
212 	/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
213 	DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
214 
215 	/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
216 	DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
217 };
218 
219 /**
220  * struct drm_panthor_sync_op - Synchronization operation.
221  */
222 struct drm_panthor_sync_op {
223 	/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
224 	__u32 flags;
225 
226 	/** @handle: Sync handle. */
227 	__u32 handle;
228 
229 	/**
230 	 * @timeline_value: MBZ if
231 	 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
232 	 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
233 	 */
234 	__u64 timeline_value;
235 };
236 
237 /**
238  * enum drm_panthor_dev_query_type - Query type
239  *
240  * Place new types at the end, don't re-order, don't remove or replace.
241  */
242 enum drm_panthor_dev_query_type {
243 	/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
244 	DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
245 
246 	/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
247 	DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
248 
249 	/** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */
250 	DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
251 
252 	/**
253 	 * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.
254 	 */
255 	DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
256 };
257 
258 /**
259  * enum drm_panthor_gpu_coherency: Type of GPU coherency
260  */
261 enum drm_panthor_gpu_coherency {
262 	/**
263 	 * @DRM_PANTHOR_GPU_COHERENCY_ACE_LITE: ACE Lite coherency.
264 	 */
265 	DRM_PANTHOR_GPU_COHERENCY_ACE_LITE = 0,
266 
267 	/**
268 	 * @DRM_PANTHOR_GPU_COHERENCY_ACE: ACE coherency.
269 	 */
270 	DRM_PANTHOR_GPU_COHERENCY_ACE = 1,
271 
272 	/**
273 	 * @DRM_PANTHOR_GPU_COHERENCY_NONE: No coherency.
274 	 */
275 	DRM_PANTHOR_GPU_COHERENCY_NONE = 31,
276 };
277 
278 /**
279  * struct drm_panthor_gpu_info - GPU information
280  *
281  * Structure grouping all queryable information relating to the GPU.
282  */
283 struct drm_panthor_gpu_info {
284 	/** @gpu_id : GPU ID. */
285 	__u32 gpu_id;
286 #define DRM_PANTHOR_ARCH_MAJOR(x)		((x) >> 28)
287 #define DRM_PANTHOR_ARCH_MINOR(x)		(((x) >> 24) & 0xf)
288 #define DRM_PANTHOR_ARCH_REV(x)			(((x) >> 20) & 0xf)
289 #define DRM_PANTHOR_PRODUCT_MAJOR(x)		(((x) >> 16) & 0xf)
290 #define DRM_PANTHOR_VERSION_MAJOR(x)		(((x) >> 12) & 0xf)
291 #define DRM_PANTHOR_VERSION_MINOR(x)		(((x) >> 4) & 0xff)
292 #define DRM_PANTHOR_VERSION_STATUS(x)		((x) & 0xf)
293 
294 	/** @gpu_rev: GPU revision. */
295 	__u32 gpu_rev;
296 
297 	/** @csf_id: Command stream frontend ID. */
298 	__u32 csf_id;
299 #define DRM_PANTHOR_CSHW_MAJOR(x)		(((x) >> 26) & 0x3f)
300 #define DRM_PANTHOR_CSHW_MINOR(x)		(((x) >> 20) & 0x3f)
301 #define DRM_PANTHOR_CSHW_REV(x)			(((x) >> 16) & 0xf)
302 #define DRM_PANTHOR_MCU_MAJOR(x)		(((x) >> 10) & 0x3f)
303 #define DRM_PANTHOR_MCU_MINOR(x)		(((x) >> 4) & 0x3f)
304 #define DRM_PANTHOR_MCU_REV(x)			((x) & 0xf)
305 
306 	/** @l2_features: L2-cache features. */
307 	__u32 l2_features;
308 
309 	/** @tiler_features: Tiler features. */
310 	__u32 tiler_features;
311 
312 	/** @mem_features: Memory features. */
313 	__u32 mem_features;
314 
315 	/** @mmu_features: MMU features. */
316 	__u32 mmu_features;
317 #define DRM_PANTHOR_MMU_VA_BITS(x)		((x) & 0xff)
318 
319 	/** @thread_features: Thread features. */
320 	__u32 thread_features;
321 
322 	/** @max_threads: Maximum number of threads. */
323 	__u32 max_threads;
324 
325 	/** @thread_max_workgroup_size: Maximum workgroup size. */
326 	__u32 thread_max_workgroup_size;
327 
328 	/**
329 	 * @thread_max_barrier_size: Maximum number of threads that can wait
330 	 * simultaneously on a barrier.
331 	 */
332 	__u32 thread_max_barrier_size;
333 
334 	/**
335 	 * @coherency_features: Coherency features.
336 	 *
337 	 * Combination of drm_panthor_gpu_coherency flags.
338 	 *
339 	 * Note that this is just what the coherency protocols supported by the
340 	 * GPU, but the actual coherency in place depends on the SoC
341 	 * integration and is reflected by
342 	 * drm_panthor_gpu_info::selected_coherency.
343 	 */
344 	__u32 coherency_features;
345 
346 	/** @texture_features: Texture features. */
347 	__u32 texture_features[4];
348 
349 	/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
350 	__u32 as_present;
351 
352 	/**
353 	 * @selected_coherency: Coherency selected for this device.
354 	 *
355 	 * One of drm_panthor_gpu_coherency.
356 	 */
357 	__u32 selected_coherency;
358 
359 	/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
360 	__u64 shader_present;
361 
362 	/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
363 	__u64 l2_present;
364 
365 	/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
366 	__u64 tiler_present;
367 
368 	/** @core_features: Used to discriminate core variants when they exist. */
369 	__u32 core_features;
370 
371 	/** @pad: MBZ. */
372 	__u32 pad;
373 
374 	/** @gpu_features: Bitmask describing supported GPU-wide features */
375 	__u64 gpu_features;
376 };
377 
378 /**
379  * struct drm_panthor_csif_info - Command stream interface information
380  *
381  * Structure grouping all queryable information relating to the command stream interface.
382  */
383 struct drm_panthor_csif_info {
384 	/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
385 	__u32 csg_slot_count;
386 
387 	/** @cs_slot_count: Number of command stream slots per group. */
388 	__u32 cs_slot_count;
389 
390 	/** @cs_reg_count: Number of command stream registers. */
391 	__u32 cs_reg_count;
392 
393 	/** @scoreboard_slot_count: Number of scoreboard slots. */
394 	__u32 scoreboard_slot_count;
395 
396 	/**
397 	 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
398 	 * the kernel driver to call a userspace command stream.
399 	 *
400 	 * All registers can be used by a userspace command stream, but the
401 	 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
402 	 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
403 	 */
404 	__u32 unpreserved_cs_reg_count;
405 
406 	/**
407 	 * @pad: Padding field, set to zero.
408 	 */
409 	__u32 pad;
410 };
411 
412 /**
413  * struct drm_panthor_timestamp_info - Timestamp information
414  *
415  * Structure grouping all queryable information relating to the GPU timestamp.
416  */
417 struct drm_panthor_timestamp_info {
418 	/**
419 	 * @timestamp_frequency: The frequency of the timestamp timer or 0 if
420 	 * unknown.
421 	 */
422 	__u64 timestamp_frequency;
423 
424 	/** @current_timestamp: The current timestamp. */
425 	__u64 current_timestamp;
426 
427 	/** @timestamp_offset: The offset of the timestamp timer. */
428 	__u64 timestamp_offset;
429 };
430 
431 /**
432  * struct drm_panthor_group_priorities_info - Group priorities information
433  *
434  * Structure grouping all queryable information relating to the allowed group priorities.
435  */
436 struct drm_panthor_group_priorities_info {
437 	/**
438 	 * @allowed_mask: Bitmask of the allowed group priorities.
439 	 *
440 	 * Each bit represents a variant of the enum drm_panthor_group_priority.
441 	 */
442 	__u8 allowed_mask;
443 
444 	/** @pad: Padding fields, MBZ. */
445 	__u8 pad[3];
446 };
447 
448 /**
449  * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
450  */
451 struct drm_panthor_dev_query {
452 	/** @type: the query type (see drm_panthor_dev_query_type). */
453 	__u32 type;
454 
455 	/**
456 	 * @size: size of the type being queried.
457 	 *
458 	 * If pointer is NULL, size is updated by the driver to provide the
459 	 * output structure size. If pointer is not NULL, the driver will
460 	 * only copy min(size, actual_structure_size) bytes to the pointer,
461 	 * and update the size accordingly. This allows us to extend query
462 	 * types without breaking userspace.
463 	 */
464 	__u32 size;
465 
466 	/**
467 	 * @pointer: user pointer to a query type struct.
468 	 *
469 	 * Pointer can be NULL, in which case, nothing is copied, but the
470 	 * actual structure size is returned. If not NULL, it must point to
471 	 * a location that's large enough to hold size bytes.
472 	 */
473 	__u64 pointer;
474 };
475 
476 /**
477  * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
478  */
479 struct drm_panthor_vm_create {
480 	/** @flags: VM flags, MBZ. */
481 	__u32 flags;
482 
483 	/** @id: Returned VM ID. */
484 	__u32 id;
485 
486 	/**
487 	 * @user_va_range: Size of the VA space reserved for user objects.
488 	 *
489 	 * The kernel will pick the remaining space to map kernel-only objects to the
490 	 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
491 	 * ...). If the space left for kernel objects is too small, kernel object
492 	 * allocation will fail further down the road. One can use
493 	 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
494 	 * range, and chose a user_va_range that leaves some space to the kernel.
495 	 *
496 	 * If user_va_range is zero, the kernel will pick a sensible value based on
497 	 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
498 	 * split should leave enough VA space for userspace processes to support SVM,
499 	 * while still allowing the kernel to map some amount of kernel objects in
500 	 * the kernel VA range). The value chosen by the driver will be returned in
501 	 * @user_va_range.
502 	 *
503 	 * User VA space always starts at 0x0, kernel VA space is always placed after
504 	 * the user VA range.
505 	 */
506 	__u64 user_va_range;
507 };
508 
509 /**
510  * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
511  */
512 struct drm_panthor_vm_destroy {
513 	/** @id: ID of the VM to destroy. */
514 	__u32 id;
515 
516 	/** @pad: MBZ. */
517 	__u32 pad;
518 };
519 
520 /**
521  * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
522  */
523 enum drm_panthor_vm_bind_op_flags {
524 	/**
525 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
526 	 *
527 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
528 	 */
529 	DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
530 
531 	/**
532 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
533 	 *
534 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
535 	 */
536 	DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
537 
538 	/**
539 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
540 	 *
541 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
542 	 */
543 	DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
544 
545 	/**
546 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
547 	 */
548 	DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
549 
550 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
551 	DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
552 
553 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
554 	DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
555 
556 	/**
557 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
558 	 *
559 	 * Just serves as a synchronization point on a VM queue.
560 	 *
561 	 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
562 	 * and drm_panthor_vm_bind_op::syncs contains at least one element.
563 	 */
564 	DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
565 };
566 
567 /**
568  * struct drm_panthor_vm_bind_op - VM bind operation
569  */
570 struct drm_panthor_vm_bind_op {
571 	/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
572 	__u32 flags;
573 
574 	/**
575 	 * @bo_handle: Handle of the buffer object to map.
576 	 * MBZ for unmap or sync-only operations.
577 	 */
578 	__u32 bo_handle;
579 
580 	/**
581 	 * @bo_offset: Buffer object offset.
582 	 * MBZ for unmap or sync-only operations.
583 	 */
584 	__u64 bo_offset;
585 
586 	/**
587 	 * @va: Virtual address to map/unmap.
588 	 * MBZ for sync-only operations.
589 	 */
590 	__u64 va;
591 
592 	/**
593 	 * @size: Size to map/unmap.
594 	 * MBZ for sync-only operations.
595 	 */
596 	__u64 size;
597 
598 	/**
599 	 * @syncs: Array of struct drm_panthor_sync_op synchronization
600 	 * operations.
601 	 *
602 	 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
603 	 * the drm_panthor_vm_bind object containing this VM bind operation.
604 	 *
605 	 * This array shall not be empty for sync-only operations.
606 	 */
607 	struct drm_panthor_obj_array syncs;
608 
609 };
610 
611 /**
612  * enum drm_panthor_vm_bind_flags - VM bind flags
613  */
614 enum drm_panthor_vm_bind_flags {
615 	/**
616 	 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
617 	 * queue instead of being executed synchronously.
618 	 */
619 	DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
620 };
621 
622 /**
623  * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
624  */
625 struct drm_panthor_vm_bind {
626 	/** @vm_id: VM targeted by the bind request. */
627 	__u32 vm_id;
628 
629 	/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
630 	__u32 flags;
631 
632 	/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
633 	struct drm_panthor_obj_array ops;
634 };
635 
636 /**
637  * enum drm_panthor_vm_state - VM states.
638  */
639 enum drm_panthor_vm_state {
640 	/**
641 	 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
642 	 *
643 	 * New VM operations will be accepted on this VM.
644 	 */
645 	DRM_PANTHOR_VM_STATE_USABLE,
646 
647 	/**
648 	 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
649 	 *
650 	 * Something put the VM in an unusable state (like an asynchronous
651 	 * VM_BIND request failing for any reason).
652 	 *
653 	 * Once the VM is in this state, all new MAP operations will be
654 	 * rejected, and any GPU job targeting this VM will fail.
655 	 * UNMAP operations are still accepted.
656 	 *
657 	 * The only way to recover from an unusable VM is to create a new
658 	 * VM, and destroy the old one.
659 	 */
660 	DRM_PANTHOR_VM_STATE_UNUSABLE,
661 };
662 
663 /**
664  * struct drm_panthor_vm_get_state - Get VM state.
665  */
666 struct drm_panthor_vm_get_state {
667 	/** @vm_id: VM targeted by the get_state request. */
668 	__u32 vm_id;
669 
670 	/**
671 	 * @state: state returned by the driver.
672 	 *
673 	 * Must be one of the enum drm_panthor_vm_state values.
674 	 */
675 	__u32 state;
676 };
677 
678 /**
679  * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
680  */
681 enum drm_panthor_bo_flags {
682 	/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
683 	DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
684 
685 	/**
686 	 * @DRM_PANTHOR_BO_WB_MMAP: Force "Write-Back Cacheable" CPU mapping.
687 	 *
688 	 * CPU map the buffer object in userspace by forcing the "Write-Back
689 	 * Cacheable" cacheability attribute. The mapping otherwise uses the
690 	 * "Non-Cacheable" attribute if the GPU is not IO coherent.
691 	 */
692 	DRM_PANTHOR_BO_WB_MMAP = (1 << 1),
693 };
694 
695 /**
696  * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
697  */
698 struct drm_panthor_bo_create {
699 	/**
700 	 * @size: Requested size for the object
701 	 *
702 	 * The (page-aligned) allocated size for the object will be returned.
703 	 */
704 	__u64 size;
705 
706 	/**
707 	 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
708 	 */
709 	__u32 flags;
710 
711 	/**
712 	 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
713 	 *
714 	 * If not zero, the field must refer to a valid VM ID, and implies that:
715 	 *  - the buffer object will only ever be bound to that VM
716 	 *  - cannot be exported as a PRIME fd
717 	 */
718 	__u32 exclusive_vm_id;
719 
720 	/**
721 	 * @handle: Returned handle for the object.
722 	 *
723 	 * Object handles are nonzero.
724 	 */
725 	__u32 handle;
726 
727 	/** @pad: MBZ. */
728 	__u32 pad;
729 };
730 
731 /**
732  * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
733  */
734 struct drm_panthor_bo_mmap_offset {
735 	/** @handle: Handle of the object we want an mmap offset for. */
736 	__u32 handle;
737 
738 	/** @pad: MBZ. */
739 	__u32 pad;
740 
741 	/** @offset: The fake offset to use for subsequent mmap calls. */
742 	__u64 offset;
743 };
744 
745 /**
746  * struct drm_panthor_queue_create - Queue creation arguments.
747  */
748 struct drm_panthor_queue_create {
749 	/**
750 	 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
751 	 * 15 being the highest priority.
752 	 */
753 	__u8 priority;
754 
755 	/** @pad: Padding fields, MBZ. */
756 	__u8 pad[3];
757 
758 	/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
759 	__u32 ringbuf_size;
760 };
761 
762 /**
763  * enum drm_panthor_group_priority - Scheduling group priority
764  */
765 enum drm_panthor_group_priority {
766 	/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
767 	PANTHOR_GROUP_PRIORITY_LOW = 0,
768 
769 	/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
770 	PANTHOR_GROUP_PRIORITY_MEDIUM,
771 
772 	/**
773 	 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
774 	 *
775 	 * Requires CAP_SYS_NICE or DRM_MASTER.
776 	 */
777 	PANTHOR_GROUP_PRIORITY_HIGH,
778 
779 	/**
780 	 * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.
781 	 *
782 	 * Requires CAP_SYS_NICE or DRM_MASTER.
783 	 */
784 	PANTHOR_GROUP_PRIORITY_REALTIME,
785 };
786 
787 /**
788  * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
789  */
790 struct drm_panthor_group_create {
791 	/** @queues: Array of drm_panthor_queue_create elements. */
792 	struct drm_panthor_obj_array queues;
793 
794 	/**
795 	 * @max_compute_cores: Maximum number of cores that can be used by compute
796 	 * jobs across CS queues bound to this group.
797 	 *
798 	 * Must be less or equal to the number of bits set in @compute_core_mask.
799 	 */
800 	__u8 max_compute_cores;
801 
802 	/**
803 	 * @max_fragment_cores: Maximum number of cores that can be used by fragment
804 	 * jobs across CS queues bound to this group.
805 	 *
806 	 * Must be less or equal to the number of bits set in @fragment_core_mask.
807 	 */
808 	__u8 max_fragment_cores;
809 
810 	/**
811 	 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
812 	 * across CS queues bound to this group.
813 	 *
814 	 * Must be less or equal to the number of bits set in @tiler_core_mask.
815 	 */
816 	__u8 max_tiler_cores;
817 
818 	/** @priority: Group priority (see enum drm_panthor_group_priority). */
819 	__u8 priority;
820 
821 	/** @pad: Padding field, MBZ. */
822 	__u32 pad;
823 
824 	/**
825 	 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
826 	 *
827 	 * This field must have at least @max_compute_cores bits set.
828 	 *
829 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
830 	 */
831 	__u64 compute_core_mask;
832 
833 	/**
834 	 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
835 	 *
836 	 * This field must have at least @max_fragment_cores bits set.
837 	 *
838 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
839 	 */
840 	__u64 fragment_core_mask;
841 
842 	/**
843 	 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
844 	 *
845 	 * This field must have at least @max_tiler_cores bits set.
846 	 *
847 	 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
848 	 */
849 	__u64 tiler_core_mask;
850 
851 	/**
852 	 * @vm_id: VM ID to bind this group to.
853 	 *
854 	 * All submission to queues bound to this group will use this VM.
855 	 */
856 	__u32 vm_id;
857 
858 	/**
859 	 * @group_handle: Returned group handle. Passed back when submitting jobs or
860 	 * destroying a group.
861 	 */
862 	__u32 group_handle;
863 };
864 
865 /**
866  * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
867  */
868 struct drm_panthor_group_destroy {
869 	/** @group_handle: Group to destroy */
870 	__u32 group_handle;
871 
872 	/** @pad: Padding field, MBZ. */
873 	__u32 pad;
874 };
875 
876 /**
877  * struct drm_panthor_queue_submit - Job submission arguments.
878  *
879  * This is describing the userspace command stream to call from the kernel
880  * command stream ring-buffer. Queue submission is always part of a group
881  * submission, taking one or more jobs to submit to the underlying queues.
882  */
883 struct drm_panthor_queue_submit {
884 	/** @queue_index: Index of the queue inside a group. */
885 	__u32 queue_index;
886 
887 	/**
888 	 * @stream_size: Size of the command stream to execute.
889 	 *
890 	 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
891 	 *
892 	 * Can be zero if stream_addr is zero too.
893 	 *
894 	 * When the stream size is zero, the queue submit serves as a
895 	 * synchronization point.
896 	 */
897 	__u32 stream_size;
898 
899 	/**
900 	 * @stream_addr: GPU address of the command stream to execute.
901 	 *
902 	 * Must be aligned on 64-byte.
903 	 *
904 	 * Can be zero is stream_size is zero too.
905 	 */
906 	__u64 stream_addr;
907 
908 	/**
909 	 * @latest_flush: FLUSH_ID read at the time the stream was built.
910 	 *
911 	 * This allows cache flush elimination for the automatic
912 	 * flush+invalidate(all) done at submission time, which is needed to
913 	 * ensure the GPU doesn't get garbage when reading the indirect command
914 	 * stream buffers. If you want the cache flush to happen
915 	 * unconditionally, pass a zero here.
916 	 *
917 	 * Ignored when stream_size is zero.
918 	 */
919 	__u32 latest_flush;
920 
921 	/** @pad: MBZ. */
922 	__u32 pad;
923 
924 	/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
925 	struct drm_panthor_obj_array syncs;
926 };
927 
928 /**
929  * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
930  */
931 struct drm_panthor_group_submit {
932 	/** @group_handle: Handle of the group to queue jobs to. */
933 	__u32 group_handle;
934 
935 	/** @pad: MBZ. */
936 	__u32 pad;
937 
938 	/** @queue_submits: Array of drm_panthor_queue_submit objects. */
939 	struct drm_panthor_obj_array queue_submits;
940 };
941 
942 /**
943  * enum drm_panthor_group_state_flags - Group state flags
944  */
945 enum drm_panthor_group_state_flags {
946 	/**
947 	 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
948 	 *
949 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
950 	 */
951 	DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
952 
953 	/**
954 	 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
955 	 *
956 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
957 	 */
958 	DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
959 
960 	/**
961 	 * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other
962 	 * groups.
963 	 *
964 	 * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and
965 	 * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.
966 	 */
967 	DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,
968 };
969 
970 /**
971  * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
972  *
973  * Used to query the state of a group and decide whether a new group should be created to
974  * replace it.
975  */
976 struct drm_panthor_group_get_state {
977 	/** @group_handle: Handle of the group to query state on */
978 	__u32 group_handle;
979 
980 	/**
981 	 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
982 	 * group state.
983 	 */
984 	__u32 state;
985 
986 	/** @fatal_queues: Bitmask of queues that faced fatal faults. */
987 	__u32 fatal_queues;
988 
989 	/** @pad: MBZ */
990 	__u32 pad;
991 };
992 
993 /**
994  * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
995  */
996 struct drm_panthor_tiler_heap_create {
997 	/** @vm_id: VM ID the tiler heap should be mapped to */
998 	__u32 vm_id;
999 
1000 	/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
1001 	__u32 initial_chunk_count;
1002 
1003 	/**
1004 	 * @chunk_size: Chunk size.
1005 	 *
1006 	 * Must be page-aligned and lie in the [128k:8M] range.
1007 	 */
1008 	__u32 chunk_size;
1009 
1010 	/**
1011 	 * @max_chunks: Maximum number of chunks that can be allocated.
1012 	 *
1013 	 * Must be at least @initial_chunk_count.
1014 	 */
1015 	__u32 max_chunks;
1016 
1017 	/**
1018 	 * @target_in_flight: Maximum number of in-flight render passes.
1019 	 *
1020 	 * If the heap has more than tiler jobs in-flight, the FW will wait for render
1021 	 * passes to finish before queuing new tiler jobs.
1022 	 */
1023 	__u32 target_in_flight;
1024 
1025 	/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
1026 	__u32 handle;
1027 
1028 	/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
1029 	__u64 tiler_heap_ctx_gpu_va;
1030 
1031 	/**
1032 	 * @first_heap_chunk_gpu_va: First heap chunk.
1033 	 *
1034 	 * The tiler heap is formed of heap chunks forming a single-link list. This
1035 	 * is the first element in the list.
1036 	 */
1037 	__u64 first_heap_chunk_gpu_va;
1038 };
1039 
1040 /**
1041  * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
1042  */
1043 struct drm_panthor_tiler_heap_destroy {
1044 	/**
1045 	 * @handle: Handle of the tiler heap to destroy.
1046 	 *
1047 	 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
1048 	 */
1049 	__u32 handle;
1050 
1051 	/** @pad: Padding field, MBZ. */
1052 	__u32 pad;
1053 };
1054 
1055 /**
1056  * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
1057  */
1058 struct drm_panthor_bo_set_label {
1059 	/** @handle: Handle of the buffer object to label. */
1060 	__u32 handle;
1061 
1062 	/**  @pad: MBZ. */
1063 	__u32 pad;
1064 
1065 	/**
1066 	 * @label: User pointer to a NUL-terminated string
1067 	 *
1068 	 * Length cannot be greater than 4096
1069 	 */
1070 	__u64 label;
1071 };
1072 
1073 /**
1074  * struct drm_panthor_set_user_mmio_offset - Arguments passed to
1075  * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
1076  *
1077  * This ioctl is only really useful if you want to support userspace
1078  * CPU emulation environments where the size of an unsigned long differs
1079  * between the host and the guest architectures.
1080  */
1081 struct drm_panthor_set_user_mmio_offset {
1082 	/**
1083 	 * @offset: User MMIO offset to use.
1084 	 *
1085 	 * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
1086 	 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
1087 	 *
1088 	 * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
1089 	 * OFFSET_64BIT based on the size of an unsigned long) unless you
1090 	 * have a very good reason to overrule this decision.
1091 	 */
1092 	__u64 offset;
1093 };
1094 
1095 /**
1096  * enum drm_panthor_bo_sync_op_type - BO sync type
1097  */
1098 enum drm_panthor_bo_sync_op_type {
1099 	/** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */
1100 	DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0,
1101 
1102 	/** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and invalidate CPU caches. */
1103 	DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1,
1104 };
1105 
1106 /**
1107  * struct drm_panthor_bo_sync_op - BO map sync op
1108  */
1109 struct drm_panthor_bo_sync_op {
1110 	/** @handle: Handle of the buffer object to sync. */
1111 	__u32 handle;
1112 
1113 	/** @type: Type of operation. */
1114 	__u32 type;
1115 
1116 	/**
1117 	 * @offset: Offset into the BO at which the sync range starts.
1118 	 *
1119 	 * This will be rounded down to the nearest cache line as needed.
1120 	 */
1121 	__u64 offset;
1122 
1123 	/**
1124 	 * @size: Size of the range to sync
1125 	 *
1126 	 * @size + @offset will be rounded up to the nearest cache line as
1127 	 * needed.
1128 	 */
1129 	__u64 size;
1130 };
1131 
1132 /**
1133  * struct drm_panthor_bo_sync - BO map sync request
1134  */
1135 struct drm_panthor_bo_sync {
1136 	/**
1137 	 * @ops: Array of struct drm_panthor_bo_sync_op sync operations.
1138 	 */
1139 	struct drm_panthor_obj_array ops;
1140 };
1141 
1142 /**
1143  * enum drm_panthor_bo_extra_flags - Set of flags returned on a BO_QUERY_INFO request
1144  *
1145  * Those are flags reflecting BO properties that are not directly coming from the flags
1146  * passed are creation time, or information on BOs that were imported from other drivers.
1147  */
1148 enum drm_panthor_bo_extra_flags {
1149 	/**
1150 	 * @DRM_PANTHOR_BO_IS_IMPORTED: BO has been imported from an external driver.
1151 	 *
1152 	 * Note that imported dma-buf handles are not flagged as imported if they
1153 	 * where exported by panthor. Only buffers that are coming from other drivers
1154 	 * (dma heaps, other GPUs, display controllers, V4L, ...).
1155 	 *
1156 	 * It's also important to note that all imported BOs are mapped cached and can't
1157 	 * be considered IO-coherent even if the GPU is. This means they require explicit
1158 	 * syncs that must go through the DRM_PANTHOR_BO_SYNC ioctl (userland cache
1159 	 * maintenance is not allowed in that case, because extra operations might be
1160 	 * needed to make changes visible to the CPU/device, like buffer migration when the
1161 	 * exporter is a GPU with its own VRAM).
1162 	 */
1163 	DRM_PANTHOR_BO_IS_IMPORTED = (1 << 0),
1164 };
1165 
1166 /**
1167  * struct drm_panthor_bo_query_info - Query BO info
1168  */
1169 struct drm_panthor_bo_query_info {
1170 	/** @handle: Handle of the buffer object to query flags on. */
1171 	__u32 handle;
1172 
1173 	/**
1174 	 * @extra_flags: Combination of enum drm_panthor_bo_extra_flags flags.
1175 	 */
1176 	__u32 extra_flags;
1177 
1178 	/**
1179 	 * @create_flags: Flags passed at creation time.
1180 	 *
1181 	 * Combination of enum drm_panthor_bo_flags flags.
1182 	 * Will be zero if the buffer comes from a different driver.
1183 	 */
1184 	__u32 create_flags;
1185 
1186 	/** @pad: Will be zero on return. */
1187 	__u32 pad;
1188 };
1189 
1190 /**
1191  * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
1192  * @__access: Access type. Must be R, W or RW.
1193  * @__id: One of the DRM_PANTHOR_xxx id.
1194  * @__type: Suffix of the type being passed to the IOCTL.
1195  *
1196  * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
1197  * values instead.
1198  *
1199  * Return: An IOCTL number to be passed to ioctl() from userspace.
1200  */
1201 #define DRM_IOCTL_PANTHOR(__access, __id, __type) \
1202 	DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
1203 			   struct drm_panthor_ ## __type)
1204 
1205 enum {
1206 	DRM_IOCTL_PANTHOR_DEV_QUERY =
1207 		DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),
1208 	DRM_IOCTL_PANTHOR_VM_CREATE =
1209 		DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),
1210 	DRM_IOCTL_PANTHOR_VM_DESTROY =
1211 		DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),
1212 	DRM_IOCTL_PANTHOR_VM_BIND =
1213 		DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),
1214 	DRM_IOCTL_PANTHOR_VM_GET_STATE =
1215 		DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),
1216 	DRM_IOCTL_PANTHOR_BO_CREATE =
1217 		DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),
1218 	DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =
1219 		DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),
1220 	DRM_IOCTL_PANTHOR_GROUP_CREATE =
1221 		DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),
1222 	DRM_IOCTL_PANTHOR_GROUP_DESTROY =
1223 		DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),
1224 	DRM_IOCTL_PANTHOR_GROUP_SUBMIT =
1225 		DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),
1226 	DRM_IOCTL_PANTHOR_GROUP_GET_STATE =
1227 		DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),
1228 	DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =
1229 		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
1230 	DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
1231 		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
1232 	DRM_IOCTL_PANTHOR_BO_SET_LABEL =
1233 		DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
1234 	DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
1235 		DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
1236 	DRM_IOCTL_PANTHOR_BO_SYNC =
1237 		DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync),
1238 	DRM_IOCTL_PANTHOR_BO_QUERY_INFO =
1239 		DRM_IOCTL_PANTHOR(WR, BO_QUERY_INFO, bo_query_info),
1240 };
1241 
1242 #if defined(__cplusplus)
1243 }
1244 #endif
1245 
1246 #endif /* _PANTHOR_DRM_H_ */
1247