xref: /linux/include/uapi/drm/panthor_drm.h (revision 4f5f701c55c1dfa287c6ad55a4bcb4bc6cad974a)
1 /* SPDX-License-Identifier: MIT */
2 /* Copyright (C) 2023 Collabora ltd. */
3 #ifndef _PANTHOR_DRM_H_
4 #define _PANTHOR_DRM_H_
5 
6 #include "drm.h"
7 
8 #if defined(__cplusplus)
9 extern "C" {
10 #endif
11 
12 /**
13  * DOC: Introduction
14  *
15  * This documentation describes the Panthor IOCTLs.
16  *
17  * Just a few generic rules about the data passed to the Panthor IOCTLs:
18  *
19  * - Structures must be aligned on 64-bit/8-byte. If the object is not
20  *   naturally aligned, a padding field must be added.
21  * - Fields must be explicitly aligned to their natural type alignment with
22  *   pad[0..N] fields.
23  * - All padding fields will be checked by the driver to make sure they are
24  *   zeroed.
25  * - Flags can be added, but not removed/replaced.
26  * - New fields can be added to the main structures (the structures
27  *   directly passed to the ioctl). Those fields can be added at the end of
28  *   the structure, or replace existing padding fields. Any new field being
29  *   added must preserve the behavior that existed before those fields were
30  *   added when a value of zero is passed.
31  * - New fields can be added to indirect objects (objects pointed by the
32  *   main structure), iff those objects are passed a size to reflect the
33  *   size known by the userspace driver (see drm_panthor_obj_array::stride
34  *   or drm_panthor_dev_query::size).
35  * - If the kernel driver is too old to know some fields, those will be
36  *   ignored if zero, and otherwise rejected (and so will be zero on output).
37  * - If userspace is too old to know some fields, those will be zeroed
38  *   (input) before the structure is parsed by the kernel driver.
39  * - Each new flag/field addition must come with a driver version update so
40  *   the userspace driver doesn't have to trial and error to know which
41  *   flags are supported.
42  * - Structures should not contain unions, as this would defeat the
43  *   extensibility of such structures.
44  * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
45  *   at the end of the drm_panthor_ioctl_id enum.
46  */
47 
48 /**
49  * DOC: MMIO regions exposed to userspace.
50  *
51  * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
52  *
53  * File offset for all MMIO regions being exposed to userspace. Don't use
54  * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
55  * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
56  * different offset on 32-bit and 64-bit systems.
57  *
58  * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
59  *
60  * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
61  * GPU cache flushing through CS instructions, but the flush reduction
62  * mechanism requires a flush_id. This flush_id could be queried with an
63  * ioctl, but Arm provides a well-isolated register page containing only this
64  * read-only register, so let's expose this page through a static mmap offset
65  * and allow direct mapping of this MMIO region so we can avoid the
66  * user <-> kernel round-trip.
67  */
68 #define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT	(1ull << 43)
69 #define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT	(1ull << 56)
70 #define DRM_PANTHOR_USER_MMIO_OFFSET		(sizeof(unsigned long) < 8 ? \
71 						 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
72 						 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
73 #define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET	(DRM_PANTHOR_USER_MMIO_OFFSET | 0)
74 
75 /**
76  * DOC: IOCTL IDs
77  *
78  * enum drm_panthor_ioctl_id - IOCTL IDs
79  *
80  * Place new ioctls at the end, don't re-order, don't replace or remove entries.
81  *
82  * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
83  * definitions instead.
84  */
85 enum drm_panthor_ioctl_id {
86 	/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
87 	DRM_PANTHOR_DEV_QUERY = 0,
88 
89 	/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
90 	DRM_PANTHOR_VM_CREATE,
91 
92 	/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
93 	DRM_PANTHOR_VM_DESTROY,
94 
95 	/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
96 	DRM_PANTHOR_VM_BIND,
97 
98 	/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
99 	DRM_PANTHOR_VM_GET_STATE,
100 
101 	/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
102 	DRM_PANTHOR_BO_CREATE,
103 
104 	/**
105 	 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
106 	 * mmap to map a GEM object.
107 	 */
108 	DRM_PANTHOR_BO_MMAP_OFFSET,
109 
110 	/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
111 	DRM_PANTHOR_GROUP_CREATE,
112 
113 	/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
114 	DRM_PANTHOR_GROUP_DESTROY,
115 
116 	/**
117 	 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
118 	 * to a specific scheduling group.
119 	 */
120 	DRM_PANTHOR_GROUP_SUBMIT,
121 
122 	/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
123 	DRM_PANTHOR_GROUP_GET_STATE,
124 
125 	/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
126 	DRM_PANTHOR_TILER_HEAP_CREATE,
127 
128 	/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
129 	DRM_PANTHOR_TILER_HEAP_DESTROY,
130 };
131 
132 /**
133  * DOC: IOCTL arguments
134  */
135 
136 /**
137  * struct drm_panthor_obj_array - Object array.
138  *
139  * This object is used to pass an array of objects whose size is subject to changes in
140  * future versions of the driver. In order to support this mutability, we pass a stride
141  * describing the size of the object as known by userspace.
142  *
143  * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
144  * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
145  * the object size.
146  */
147 struct drm_panthor_obj_array {
148 	/** @stride: Stride of object struct. Used for versioning. */
149 	__u32 stride;
150 
151 	/** @count: Number of objects in the array. */
152 	__u32 count;
153 
154 	/** @array: User pointer to an array of objects. */
155 	__u64 array;
156 };
157 
158 /**
159  * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
160  * @cnt: Number of elements in the array.
161  * @ptr: Pointer to the array to pass to the kernel.
162  *
163  * Macro initializing a drm_panthor_obj_array based on the object size as known
164  * by userspace.
165  */
166 #define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
167 	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
168 
169 /**
170  * enum drm_panthor_sync_op_flags - Synchronization operation flags.
171  */
172 enum drm_panthor_sync_op_flags {
173 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
174 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
175 
176 	/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
177 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
178 
179 	/**
180 	 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
181 	 * object type.
182 	 */
183 	DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
184 
185 	/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
186 	DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
187 
188 	/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
189 	DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
190 };
191 
192 /**
193  * struct drm_panthor_sync_op - Synchronization operation.
194  */
195 struct drm_panthor_sync_op {
196 	/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
197 	__u32 flags;
198 
199 	/** @handle: Sync handle. */
200 	__u32 handle;
201 
202 	/**
203 	 * @timeline_value: MBZ if
204 	 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
205 	 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
206 	 */
207 	__u64 timeline_value;
208 };
209 
210 /**
211  * enum drm_panthor_dev_query_type - Query type
212  *
213  * Place new types at the end, don't re-order, don't remove or replace.
214  */
215 enum drm_panthor_dev_query_type {
216 	/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
217 	DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
218 
219 	/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
220 	DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
221 
222 	/** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */
223 	DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
224 
225 	/**
226 	 * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.
227 	 */
228 	DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
229 };
230 
231 /**
232  * struct drm_panthor_gpu_info - GPU information
233  *
234  * Structure grouping all queryable information relating to the GPU.
235  */
236 struct drm_panthor_gpu_info {
237 	/** @gpu_id : GPU ID. */
238 	__u32 gpu_id;
239 #define DRM_PANTHOR_ARCH_MAJOR(x)		((x) >> 28)
240 #define DRM_PANTHOR_ARCH_MINOR(x)		(((x) >> 24) & 0xf)
241 #define DRM_PANTHOR_ARCH_REV(x)			(((x) >> 20) & 0xf)
242 #define DRM_PANTHOR_PRODUCT_MAJOR(x)		(((x) >> 16) & 0xf)
243 #define DRM_PANTHOR_VERSION_MAJOR(x)		(((x) >> 12) & 0xf)
244 #define DRM_PANTHOR_VERSION_MINOR(x)		(((x) >> 4) & 0xff)
245 #define DRM_PANTHOR_VERSION_STATUS(x)		((x) & 0xf)
246 
247 	/** @gpu_rev: GPU revision. */
248 	__u32 gpu_rev;
249 
250 	/** @csf_id: Command stream frontend ID. */
251 	__u32 csf_id;
252 #define DRM_PANTHOR_CSHW_MAJOR(x)		(((x) >> 26) & 0x3f)
253 #define DRM_PANTHOR_CSHW_MINOR(x)		(((x) >> 20) & 0x3f)
254 #define DRM_PANTHOR_CSHW_REV(x)			(((x) >> 16) & 0xf)
255 #define DRM_PANTHOR_MCU_MAJOR(x)		(((x) >> 10) & 0x3f)
256 #define DRM_PANTHOR_MCU_MINOR(x)		(((x) >> 4) & 0x3f)
257 #define DRM_PANTHOR_MCU_REV(x)			((x) & 0xf)
258 
259 	/** @l2_features: L2-cache features. */
260 	__u32 l2_features;
261 
262 	/** @tiler_features: Tiler features. */
263 	__u32 tiler_features;
264 
265 	/** @mem_features: Memory features. */
266 	__u32 mem_features;
267 
268 	/** @mmu_features: MMU features. */
269 	__u32 mmu_features;
270 #define DRM_PANTHOR_MMU_VA_BITS(x)		((x) & 0xff)
271 
272 	/** @thread_features: Thread features. */
273 	__u32 thread_features;
274 
275 	/** @max_threads: Maximum number of threads. */
276 	__u32 max_threads;
277 
278 	/** @thread_max_workgroup_size: Maximum workgroup size. */
279 	__u32 thread_max_workgroup_size;
280 
281 	/**
282 	 * @thread_max_barrier_size: Maximum number of threads that can wait
283 	 * simultaneously on a barrier.
284 	 */
285 	__u32 thread_max_barrier_size;
286 
287 	/** @coherency_features: Coherency features. */
288 	__u32 coherency_features;
289 
290 	/** @texture_features: Texture features. */
291 	__u32 texture_features[4];
292 
293 	/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
294 	__u32 as_present;
295 
296 	/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
297 	__u64 shader_present;
298 
299 	/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
300 	__u64 l2_present;
301 
302 	/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
303 	__u64 tiler_present;
304 
305 	/** @core_features: Used to discriminate core variants when they exist. */
306 	__u32 core_features;
307 
308 	/** @pad: MBZ. */
309 	__u32 pad;
310 };
311 
312 /**
313  * struct drm_panthor_csif_info - Command stream interface information
314  *
315  * Structure grouping all queryable information relating to the command stream interface.
316  */
317 struct drm_panthor_csif_info {
318 	/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
319 	__u32 csg_slot_count;
320 
321 	/** @cs_slot_count: Number of command stream slots per group. */
322 	__u32 cs_slot_count;
323 
324 	/** @cs_reg_count: Number of command stream registers. */
325 	__u32 cs_reg_count;
326 
327 	/** @scoreboard_slot_count: Number of scoreboard slots. */
328 	__u32 scoreboard_slot_count;
329 
330 	/**
331 	 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
332 	 * the kernel driver to call a userspace command stream.
333 	 *
334 	 * All registers can be used by a userspace command stream, but the
335 	 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
336 	 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
337 	 */
338 	__u32 unpreserved_cs_reg_count;
339 
340 	/**
341 	 * @pad: Padding field, set to zero.
342 	 */
343 	__u32 pad;
344 };
345 
346 /**
347  * struct drm_panthor_timestamp_info - Timestamp information
348  *
349  * Structure grouping all queryable information relating to the GPU timestamp.
350  */
351 struct drm_panthor_timestamp_info {
352 	/**
353 	 * @timestamp_frequency: The frequency of the timestamp timer or 0 if
354 	 * unknown.
355 	 */
356 	__u64 timestamp_frequency;
357 
358 	/** @current_timestamp: The current timestamp. */
359 	__u64 current_timestamp;
360 
361 	/** @timestamp_offset: The offset of the timestamp timer. */
362 	__u64 timestamp_offset;
363 };
364 
365 /**
366  * struct drm_panthor_group_priorities_info - Group priorities information
367  *
368  * Structure grouping all queryable information relating to the allowed group priorities.
369  */
370 struct drm_panthor_group_priorities_info {
371 	/**
372 	 * @allowed_mask: Bitmask of the allowed group priorities.
373 	 *
374 	 * Each bit represents a variant of the enum drm_panthor_group_priority.
375 	 */
376 	__u8 allowed_mask;
377 
378 	/** @pad: Padding fields, MBZ. */
379 	__u8 pad[3];
380 };
381 
382 /**
383  * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
384  */
385 struct drm_panthor_dev_query {
386 	/** @type: the query type (see drm_panthor_dev_query_type). */
387 	__u32 type;
388 
389 	/**
390 	 * @size: size of the type being queried.
391 	 *
392 	 * If pointer is NULL, size is updated by the driver to provide the
393 	 * output structure size. If pointer is not NULL, the driver will
394 	 * only copy min(size, actual_structure_size) bytes to the pointer,
395 	 * and update the size accordingly. This allows us to extend query
396 	 * types without breaking userspace.
397 	 */
398 	__u32 size;
399 
400 	/**
401 	 * @pointer: user pointer to a query type struct.
402 	 *
403 	 * Pointer can be NULL, in which case, nothing is copied, but the
404 	 * actual structure size is returned. If not NULL, it must point to
405 	 * a location that's large enough to hold size bytes.
406 	 */
407 	__u64 pointer;
408 };
409 
410 /**
411  * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
412  */
413 struct drm_panthor_vm_create {
414 	/** @flags: VM flags, MBZ. */
415 	__u32 flags;
416 
417 	/** @id: Returned VM ID. */
418 	__u32 id;
419 
420 	/**
421 	 * @user_va_range: Size of the VA space reserved for user objects.
422 	 *
423 	 * The kernel will pick the remaining space to map kernel-only objects to the
424 	 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
425 	 * ...). If the space left for kernel objects is too small, kernel object
426 	 * allocation will fail further down the road. One can use
427 	 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
428 	 * range, and chose a user_va_range that leaves some space to the kernel.
429 	 *
430 	 * If user_va_range is zero, the kernel will pick a sensible value based on
431 	 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
432 	 * split should leave enough VA space for userspace processes to support SVM,
433 	 * while still allowing the kernel to map some amount of kernel objects in
434 	 * the kernel VA range). The value chosen by the driver will be returned in
435 	 * @user_va_range.
436 	 *
437 	 * User VA space always starts at 0x0, kernel VA space is always placed after
438 	 * the user VA range.
439 	 */
440 	__u64 user_va_range;
441 };
442 
443 /**
444  * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
445  */
446 struct drm_panthor_vm_destroy {
447 	/** @id: ID of the VM to destroy. */
448 	__u32 id;
449 
450 	/** @pad: MBZ. */
451 	__u32 pad;
452 };
453 
454 /**
455  * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
456  */
457 enum drm_panthor_vm_bind_op_flags {
458 	/**
459 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
460 	 *
461 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
462 	 */
463 	DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
464 
465 	/**
466 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
467 	 *
468 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
469 	 */
470 	DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
471 
472 	/**
473 	 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
474 	 *
475 	 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
476 	 */
477 	DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
478 
479 	/**
480 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
481 	 */
482 	DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
483 
484 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
485 	DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
486 
487 	/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
488 	DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
489 
490 	/**
491 	 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
492 	 *
493 	 * Just serves as a synchronization point on a VM queue.
494 	 *
495 	 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
496 	 * and drm_panthor_vm_bind_op::syncs contains at least one element.
497 	 */
498 	DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
499 };
500 
501 /**
502  * struct drm_panthor_vm_bind_op - VM bind operation
503  */
504 struct drm_panthor_vm_bind_op {
505 	/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
506 	__u32 flags;
507 
508 	/**
509 	 * @bo_handle: Handle of the buffer object to map.
510 	 * MBZ for unmap or sync-only operations.
511 	 */
512 	__u32 bo_handle;
513 
514 	/**
515 	 * @bo_offset: Buffer object offset.
516 	 * MBZ for unmap or sync-only operations.
517 	 */
518 	__u64 bo_offset;
519 
520 	/**
521 	 * @va: Virtual address to map/unmap.
522 	 * MBZ for sync-only operations.
523 	 */
524 	__u64 va;
525 
526 	/**
527 	 * @size: Size to map/unmap.
528 	 * MBZ for sync-only operations.
529 	 */
530 	__u64 size;
531 
532 	/**
533 	 * @syncs: Array of struct drm_panthor_sync_op synchronization
534 	 * operations.
535 	 *
536 	 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
537 	 * the drm_panthor_vm_bind object containing this VM bind operation.
538 	 *
539 	 * This array shall not be empty for sync-only operations.
540 	 */
541 	struct drm_panthor_obj_array syncs;
542 
543 };
544 
545 /**
546  * enum drm_panthor_vm_bind_flags - VM bind flags
547  */
548 enum drm_panthor_vm_bind_flags {
549 	/**
550 	 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
551 	 * queue instead of being executed synchronously.
552 	 */
553 	DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
554 };
555 
556 /**
557  * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
558  */
559 struct drm_panthor_vm_bind {
560 	/** @vm_id: VM targeted by the bind request. */
561 	__u32 vm_id;
562 
563 	/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
564 	__u32 flags;
565 
566 	/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
567 	struct drm_panthor_obj_array ops;
568 };
569 
570 /**
571  * enum drm_panthor_vm_state - VM states.
572  */
573 enum drm_panthor_vm_state {
574 	/**
575 	 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
576 	 *
577 	 * New VM operations will be accepted on this VM.
578 	 */
579 	DRM_PANTHOR_VM_STATE_USABLE,
580 
581 	/**
582 	 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
583 	 *
584 	 * Something put the VM in an unusable state (like an asynchronous
585 	 * VM_BIND request failing for any reason).
586 	 *
587 	 * Once the VM is in this state, all new MAP operations will be
588 	 * rejected, and any GPU job targeting this VM will fail.
589 	 * UNMAP operations are still accepted.
590 	 *
591 	 * The only way to recover from an unusable VM is to create a new
592 	 * VM, and destroy the old one.
593 	 */
594 	DRM_PANTHOR_VM_STATE_UNUSABLE,
595 };
596 
597 /**
598  * struct drm_panthor_vm_get_state - Get VM state.
599  */
600 struct drm_panthor_vm_get_state {
601 	/** @vm_id: VM targeted by the get_state request. */
602 	__u32 vm_id;
603 
604 	/**
605 	 * @state: state returned by the driver.
606 	 *
607 	 * Must be one of the enum drm_panthor_vm_state values.
608 	 */
609 	__u32 state;
610 };
611 
612 /**
613  * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
614  */
615 enum drm_panthor_bo_flags {
616 	/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
617 	DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
618 };
619 
620 /**
621  * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
622  */
623 struct drm_panthor_bo_create {
624 	/**
625 	 * @size: Requested size for the object
626 	 *
627 	 * The (page-aligned) allocated size for the object will be returned.
628 	 */
629 	__u64 size;
630 
631 	/**
632 	 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
633 	 */
634 	__u32 flags;
635 
636 	/**
637 	 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
638 	 *
639 	 * If not zero, the field must refer to a valid VM ID, and implies that:
640 	 *  - the buffer object will only ever be bound to that VM
641 	 *  - cannot be exported as a PRIME fd
642 	 */
643 	__u32 exclusive_vm_id;
644 
645 	/**
646 	 * @handle: Returned handle for the object.
647 	 *
648 	 * Object handles are nonzero.
649 	 */
650 	__u32 handle;
651 
652 	/** @pad: MBZ. */
653 	__u32 pad;
654 };
655 
656 /**
657  * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
658  */
659 struct drm_panthor_bo_mmap_offset {
660 	/** @handle: Handle of the object we want an mmap offset for. */
661 	__u32 handle;
662 
663 	/** @pad: MBZ. */
664 	__u32 pad;
665 
666 	/** @offset: The fake offset to use for subsequent mmap calls. */
667 	__u64 offset;
668 };
669 
670 /**
671  * struct drm_panthor_queue_create - Queue creation arguments.
672  */
673 struct drm_panthor_queue_create {
674 	/**
675 	 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
676 	 * 15 being the highest priority.
677 	 */
678 	__u8 priority;
679 
680 	/** @pad: Padding fields, MBZ. */
681 	__u8 pad[3];
682 
683 	/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
684 	__u32 ringbuf_size;
685 };
686 
687 /**
688  * enum drm_panthor_group_priority - Scheduling group priority
689  */
690 enum drm_panthor_group_priority {
691 	/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
692 	PANTHOR_GROUP_PRIORITY_LOW = 0,
693 
694 	/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
695 	PANTHOR_GROUP_PRIORITY_MEDIUM,
696 
697 	/**
698 	 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
699 	 *
700 	 * Requires CAP_SYS_NICE or DRM_MASTER.
701 	 */
702 	PANTHOR_GROUP_PRIORITY_HIGH,
703 
704 	/**
705 	 * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.
706 	 *
707 	 * Requires CAP_SYS_NICE or DRM_MASTER.
708 	 */
709 	PANTHOR_GROUP_PRIORITY_REALTIME,
710 };
711 
712 /**
713  * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
714  */
715 struct drm_panthor_group_create {
716 	/** @queues: Array of drm_panthor_queue_create elements. */
717 	struct drm_panthor_obj_array queues;
718 
719 	/**
720 	 * @max_compute_cores: Maximum number of cores that can be used by compute
721 	 * jobs across CS queues bound to this group.
722 	 *
723 	 * Must be less or equal to the number of bits set in @compute_core_mask.
724 	 */
725 	__u8 max_compute_cores;
726 
727 	/**
728 	 * @max_fragment_cores: Maximum number of cores that can be used by fragment
729 	 * jobs across CS queues bound to this group.
730 	 *
731 	 * Must be less or equal to the number of bits set in @fragment_core_mask.
732 	 */
733 	__u8 max_fragment_cores;
734 
735 	/**
736 	 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
737 	 * across CS queues bound to this group.
738 	 *
739 	 * Must be less or equal to the number of bits set in @tiler_core_mask.
740 	 */
741 	__u8 max_tiler_cores;
742 
743 	/** @priority: Group priority (see enum drm_panthor_group_priority). */
744 	__u8 priority;
745 
746 	/** @pad: Padding field, MBZ. */
747 	__u32 pad;
748 
749 	/**
750 	 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
751 	 *
752 	 * This field must have at least @max_compute_cores bits set.
753 	 *
754 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
755 	 */
756 	__u64 compute_core_mask;
757 
758 	/**
759 	 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
760 	 *
761 	 * This field must have at least @max_fragment_cores bits set.
762 	 *
763 	 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
764 	 */
765 	__u64 fragment_core_mask;
766 
767 	/**
768 	 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
769 	 *
770 	 * This field must have at least @max_tiler_cores bits set.
771 	 *
772 	 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
773 	 */
774 	__u64 tiler_core_mask;
775 
776 	/**
777 	 * @vm_id: VM ID to bind this group to.
778 	 *
779 	 * All submission to queues bound to this group will use this VM.
780 	 */
781 	__u32 vm_id;
782 
783 	/**
784 	 * @group_handle: Returned group handle. Passed back when submitting jobs or
785 	 * destroying a group.
786 	 */
787 	__u32 group_handle;
788 };
789 
790 /**
791  * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
792  */
793 struct drm_panthor_group_destroy {
794 	/** @group_handle: Group to destroy */
795 	__u32 group_handle;
796 
797 	/** @pad: Padding field, MBZ. */
798 	__u32 pad;
799 };
800 
801 /**
802  * struct drm_panthor_queue_submit - Job submission arguments.
803  *
804  * This is describing the userspace command stream to call from the kernel
805  * command stream ring-buffer. Queue submission is always part of a group
806  * submission, taking one or more jobs to submit to the underlying queues.
807  */
808 struct drm_panthor_queue_submit {
809 	/** @queue_index: Index of the queue inside a group. */
810 	__u32 queue_index;
811 
812 	/**
813 	 * @stream_size: Size of the command stream to execute.
814 	 *
815 	 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
816 	 *
817 	 * Can be zero if stream_addr is zero too.
818 	 *
819 	 * When the stream size is zero, the queue submit serves as a
820 	 * synchronization point.
821 	 */
822 	__u32 stream_size;
823 
824 	/**
825 	 * @stream_addr: GPU address of the command stream to execute.
826 	 *
827 	 * Must be aligned on 64-byte.
828 	 *
829 	 * Can be zero is stream_size is zero too.
830 	 */
831 	__u64 stream_addr;
832 
833 	/**
834 	 * @latest_flush: FLUSH_ID read at the time the stream was built.
835 	 *
836 	 * This allows cache flush elimination for the automatic
837 	 * flush+invalidate(all) done at submission time, which is needed to
838 	 * ensure the GPU doesn't get garbage when reading the indirect command
839 	 * stream buffers. If you want the cache flush to happen
840 	 * unconditionally, pass a zero here.
841 	 *
842 	 * Ignored when stream_size is zero.
843 	 */
844 	__u32 latest_flush;
845 
846 	/** @pad: MBZ. */
847 	__u32 pad;
848 
849 	/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
850 	struct drm_panthor_obj_array syncs;
851 };
852 
853 /**
854  * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
855  */
856 struct drm_panthor_group_submit {
857 	/** @group_handle: Handle of the group to queue jobs to. */
858 	__u32 group_handle;
859 
860 	/** @pad: MBZ. */
861 	__u32 pad;
862 
863 	/** @queue_submits: Array of drm_panthor_queue_submit objects. */
864 	struct drm_panthor_obj_array queue_submits;
865 };
866 
867 /**
868  * enum drm_panthor_group_state_flags - Group state flags
869  */
870 enum drm_panthor_group_state_flags {
871 	/**
872 	 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
873 	 *
874 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
875 	 */
876 	DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
877 
878 	/**
879 	 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
880 	 *
881 	 * When a group ends up with this flag set, no jobs can be submitted to its queues.
882 	 */
883 	DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
884 
885 	/**
886 	 * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other
887 	 * groups.
888 	 *
889 	 * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and
890 	 * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.
891 	 */
892 	DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,
893 };
894 
895 /**
896  * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
897  *
898  * Used to query the state of a group and decide whether a new group should be created to
899  * replace it.
900  */
901 struct drm_panthor_group_get_state {
902 	/** @group_handle: Handle of the group to query state on */
903 	__u32 group_handle;
904 
905 	/**
906 	 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
907 	 * group state.
908 	 */
909 	__u32 state;
910 
911 	/** @fatal_queues: Bitmask of queues that faced fatal faults. */
912 	__u32 fatal_queues;
913 
914 	/** @pad: MBZ */
915 	__u32 pad;
916 };
917 
918 /**
919  * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
920  */
921 struct drm_panthor_tiler_heap_create {
922 	/** @vm_id: VM ID the tiler heap should be mapped to */
923 	__u32 vm_id;
924 
925 	/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
926 	__u32 initial_chunk_count;
927 
928 	/**
929 	 * @chunk_size: Chunk size.
930 	 *
931 	 * Must be page-aligned and lie in the [128k:8M] range.
932 	 */
933 	__u32 chunk_size;
934 
935 	/**
936 	 * @max_chunks: Maximum number of chunks that can be allocated.
937 	 *
938 	 * Must be at least @initial_chunk_count.
939 	 */
940 	__u32 max_chunks;
941 
942 	/**
943 	 * @target_in_flight: Maximum number of in-flight render passes.
944 	 *
945 	 * If the heap has more than tiler jobs in-flight, the FW will wait for render
946 	 * passes to finish before queuing new tiler jobs.
947 	 */
948 	__u32 target_in_flight;
949 
950 	/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
951 	__u32 handle;
952 
953 	/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
954 	__u64 tiler_heap_ctx_gpu_va;
955 
956 	/**
957 	 * @first_heap_chunk_gpu_va: First heap chunk.
958 	 *
959 	 * The tiler heap is formed of heap chunks forming a single-link list. This
960 	 * is the first element in the list.
961 	 */
962 	__u64 first_heap_chunk_gpu_va;
963 };
964 
965 /**
966  * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
967  */
968 struct drm_panthor_tiler_heap_destroy {
969 	/**
970 	 * @handle: Handle of the tiler heap to destroy.
971 	 *
972 	 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
973 	 */
974 	__u32 handle;
975 
976 	/** @pad: Padding field, MBZ. */
977 	__u32 pad;
978 };
979 
980 /**
981  * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
982  * @__access: Access type. Must be R, W or RW.
983  * @__id: One of the DRM_PANTHOR_xxx id.
984  * @__type: Suffix of the type being passed to the IOCTL.
985  *
986  * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
987  * values instead.
988  *
989  * Return: An IOCTL number to be passed to ioctl() from userspace.
990  */
991 #define DRM_IOCTL_PANTHOR(__access, __id, __type) \
992 	DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
993 			   struct drm_panthor_ ## __type)
994 
995 enum {
996 	DRM_IOCTL_PANTHOR_DEV_QUERY =
997 		DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),
998 	DRM_IOCTL_PANTHOR_VM_CREATE =
999 		DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),
1000 	DRM_IOCTL_PANTHOR_VM_DESTROY =
1001 		DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),
1002 	DRM_IOCTL_PANTHOR_VM_BIND =
1003 		DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),
1004 	DRM_IOCTL_PANTHOR_VM_GET_STATE =
1005 		DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),
1006 	DRM_IOCTL_PANTHOR_BO_CREATE =
1007 		DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),
1008 	DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =
1009 		DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),
1010 	DRM_IOCTL_PANTHOR_GROUP_CREATE =
1011 		DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),
1012 	DRM_IOCTL_PANTHOR_GROUP_DESTROY =
1013 		DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),
1014 	DRM_IOCTL_PANTHOR_GROUP_SUBMIT =
1015 		DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),
1016 	DRM_IOCTL_PANTHOR_GROUP_GET_STATE =
1017 		DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),
1018 	DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =
1019 		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
1020 	DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
1021 		DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
1022 };
1023 
1024 #if defined(__cplusplus)
1025 }
1026 #endif
1027 
1028 #endif /* _PANTHOR_DRM_H_ */
1029