xref: /linux/include/uapi/drm/panfrost_drm.h (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2014-2018 Broadcom
4  * Copyright © 2019 Collabora ltd.
5  */
6 #ifndef _PANFROST_DRM_H_
7 #define _PANFROST_DRM_H_
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 #define DRM_PANFROST_SUBMIT			0x00
16 #define DRM_PANFROST_WAIT_BO			0x01
17 #define DRM_PANFROST_CREATE_BO			0x02
18 #define DRM_PANFROST_MMAP_BO			0x03
19 #define DRM_PANFROST_GET_PARAM			0x04
20 #define DRM_PANFROST_GET_BO_OFFSET		0x05
21 #define DRM_PANFROST_PERFCNT_ENABLE		0x06
22 #define DRM_PANFROST_PERFCNT_DUMP		0x07
23 #define DRM_PANFROST_MADVISE			0x08
24 #define DRM_PANFROST_SET_LABEL_BO		0x09
25 #define DRM_PANFROST_JM_CTX_CREATE		0x0a
26 #define DRM_PANFROST_JM_CTX_DESTROY		0x0b
27 #define DRM_PANFROST_SYNC_BO			0x0c
28 #define DRM_PANFROST_QUERY_BO_INFO		0x0d
29 
30 #define DRM_IOCTL_PANFROST_SUBMIT		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
31 #define DRM_IOCTL_PANFROST_WAIT_BO		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
32 #define DRM_IOCTL_PANFROST_CREATE_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
33 #define DRM_IOCTL_PANFROST_MMAP_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
34 #define DRM_IOCTL_PANFROST_GET_PARAM		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
35 #define DRM_IOCTL_PANFROST_GET_BO_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
36 #define DRM_IOCTL_PANFROST_MADVISE		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
37 #define DRM_IOCTL_PANFROST_SET_LABEL_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
38 #define DRM_IOCTL_PANFROST_JM_CTX_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
39 #define DRM_IOCTL_PANFROST_JM_CTX_DESTROY	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
40 #define DRM_IOCTL_PANFROST_SYNC_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SYNC_BO, struct drm_panfrost_sync_bo)
41 #define DRM_IOCTL_PANFROST_QUERY_BO_INFO	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_QUERY_BO_INFO, struct drm_panfrost_query_bo_info)
42 
43 /*
44  * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
45  * param is set to true.
46  * All these ioctl(s) are subject to deprecation, so please don't rely on
47  * them for anything but debugging purpose.
48  */
49 #define DRM_IOCTL_PANFROST_PERFCNT_ENABLE	DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_ENABLE, struct drm_panfrost_perfcnt_enable)
50 #define DRM_IOCTL_PANFROST_PERFCNT_DUMP		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump)
51 
52 #define PANFROST_JD_REQ_FS (1 << 0)
53 #define PANFROST_JD_REQ_CYCLE_COUNT (1 << 1)
54 /**
55  * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
56  * engine.
57  *
58  * This asks the kernel to have the GPU execute a render command list.
59  */
60 struct drm_panfrost_submit {
61 	/**
62 	 * @jc: Address to GPU mapping of job descriptor
63 	 */
64 	__u64 jc;
65 	/**
66 	 * @in_syncs: An optional array of sync objects to wait on
67 	 * before starting this job.
68 	 */
69 	__u64 in_syncs;
70 	/**
71 	 * @in_sync_count: Number of sync objects to wait on before
72 	 * starting this job.
73 	 */
74 	__u32 in_sync_count;
75 	/**
76 	 * @out_sync: An optional sync object to place the completion fence in.
77 	 */
78 	__u32 out_sync;
79 	/**
80 	 * @bo_handles: Pointer to a u32 array of the BOs that are
81 	 * referenced by the job.
82 	 */
83 	__u64 bo_handles;
84 	/**
85 	 * @bo_handle_count: Number of BO handles passed in (size is
86 	 * that times 4).
87 	 */
88 	__u32 bo_handle_count;
89 	/**
90 	 * @requirements: A combination of PANFROST_JD_REQ_*
91 	 */
92 	__u32 requirements;
93 	/**
94 	 * @jm_ctx_handle: JM context handle. Zero if you want to use the
95 	 * default context.
96 	 */
97 	__u32 jm_ctx_handle;
98 	/**
99 	 * @pad: Padding field. Must be zero.
100 	 */
101 	__u32 pad;
102 };
103 
104 /**
105  * struct drm_panfrost_wait_bo - ioctl argument for waiting for
106  * completion of the last DRM_PANFROST_SUBMIT on a BO.
107  *
108  * This is useful for cases where multiple processes might be
109  * rendering to a BO and you want to wait for all rendering to be
110  * completed.
111  */
112 struct drm_panfrost_wait_bo {
113 	/**
114 	 * @handle: Handle for the object to wait for.
115 	 */
116 	__u32 handle;
117 	/**
118 	 * @pad: Padding, must be zero-filled.
119 	 */
120 	__u32 pad;
121 	/**
122 	 * @timeout_ns: absolute number of nanoseconds to wait.
123 	 */
124 	__s64 timeout_ns;
125 };
126 
127 /* Valid flags to pass to drm_panfrost_create_bo.
128  * PANFROST_BO_WB_MMAP can't be set if PANFROST_BO_HEAP is.
129  */
130 #define PANFROST_BO_NOEXEC	1
131 #define PANFROST_BO_HEAP	2
132 #define PANFROST_BO_WB_MMAP	4
133 
134 /**
135  * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
136  *
137  * The flags argument is a bit mask of PANFROST_BO_* flags.
138  */
139 struct drm_panfrost_create_bo {
140 	/**
141 	 * @size: size of shmem/BO area to create (bytes)
142 	 */
143 	__u32 size;
144 	/**
145 	 * @flags: see PANFROST_BO_* flags
146 	 */
147 	__u32 flags;
148 	/**
149 	 * @handle: Returned GEM handle for the BO.
150 	 */
151 	__u32 handle;
152 	/**
153 	 * @pad: Padding, must be zero-filled.
154 	 */
155 	__u32 pad;
156 	/**
157 	 * @offset: Returned offset for the BO in the GPU address space.
158 	 * This offset is private to the DRM fd and is valid for the
159 	 * lifetime of the GEM handle.
160 	 *
161 	 * This offset value will always be nonzero, since various HW
162 	 * units treat 0 specially.
163 	 */
164 	__u64 offset;
165 };
166 
167 /**
168  * struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
169  *
170  * This doesn't actually perform an mmap.  Instead, it returns the
171  * offset you need to use in an mmap on the DRM device node.  This
172  * means that tools like valgrind end up knowing about the mapped
173  * memory.
174  *
175  * There are currently no values for the flags argument, but it may be
176  * used in a future extension.
177  */
178 struct drm_panfrost_mmap_bo {
179 	/**
180 	 * @handle: Handle for the object being mapped.
181 	 */
182 	__u32 handle;
183 	/**
184 	 * @flags: currently not used (should be zero)
185 	 */
186 	__u32 flags;
187 	/**
188 	 * @offset: offset into the drm node to use for subsequent mmap call.
189 	 */
190 	__u64 offset;
191 };
192 
193 enum drm_panfrost_param {
194 	DRM_PANFROST_PARAM_GPU_PROD_ID,
195 	DRM_PANFROST_PARAM_GPU_REVISION,
196 	DRM_PANFROST_PARAM_SHADER_PRESENT,
197 	DRM_PANFROST_PARAM_TILER_PRESENT,
198 	DRM_PANFROST_PARAM_L2_PRESENT,
199 	DRM_PANFROST_PARAM_STACK_PRESENT,
200 	DRM_PANFROST_PARAM_AS_PRESENT,
201 	DRM_PANFROST_PARAM_JS_PRESENT,
202 	DRM_PANFROST_PARAM_L2_FEATURES,
203 	DRM_PANFROST_PARAM_CORE_FEATURES,
204 	DRM_PANFROST_PARAM_TILER_FEATURES,
205 	DRM_PANFROST_PARAM_MEM_FEATURES,
206 	DRM_PANFROST_PARAM_MMU_FEATURES,
207 	DRM_PANFROST_PARAM_THREAD_FEATURES,
208 	DRM_PANFROST_PARAM_MAX_THREADS,
209 	DRM_PANFROST_PARAM_THREAD_MAX_WORKGROUP_SZ,
210 	DRM_PANFROST_PARAM_THREAD_MAX_BARRIER_SZ,
211 	DRM_PANFROST_PARAM_COHERENCY_FEATURES,
212 	DRM_PANFROST_PARAM_TEXTURE_FEATURES0,
213 	DRM_PANFROST_PARAM_TEXTURE_FEATURES1,
214 	DRM_PANFROST_PARAM_TEXTURE_FEATURES2,
215 	DRM_PANFROST_PARAM_TEXTURE_FEATURES3,
216 	DRM_PANFROST_PARAM_JS_FEATURES0,
217 	DRM_PANFROST_PARAM_JS_FEATURES1,
218 	DRM_PANFROST_PARAM_JS_FEATURES2,
219 	DRM_PANFROST_PARAM_JS_FEATURES3,
220 	DRM_PANFROST_PARAM_JS_FEATURES4,
221 	DRM_PANFROST_PARAM_JS_FEATURES5,
222 	DRM_PANFROST_PARAM_JS_FEATURES6,
223 	DRM_PANFROST_PARAM_JS_FEATURES7,
224 	DRM_PANFROST_PARAM_JS_FEATURES8,
225 	DRM_PANFROST_PARAM_JS_FEATURES9,
226 	DRM_PANFROST_PARAM_JS_FEATURES10,
227 	DRM_PANFROST_PARAM_JS_FEATURES11,
228 	DRM_PANFROST_PARAM_JS_FEATURES12,
229 	DRM_PANFROST_PARAM_JS_FEATURES13,
230 	DRM_PANFROST_PARAM_JS_FEATURES14,
231 	DRM_PANFROST_PARAM_JS_FEATURES15,
232 	DRM_PANFROST_PARAM_NR_CORE_GROUPS,
233 	DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
234 	DRM_PANFROST_PARAM_AFBC_FEATURES,
235 	DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
236 	DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
237 	DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
238 	DRM_PANFROST_PARAM_SELECTED_COHERENCY,
239 };
240 
241 enum drm_panfrost_gpu_coherency {
242 	DRM_PANFROST_GPU_COHERENCY_ACE_LITE = 0,
243 	DRM_PANFROST_GPU_COHERENCY_ACE = 1,
244 	DRM_PANFROST_GPU_COHERENCY_NONE = 31,
245 };
246 
247 struct drm_panfrost_get_param {
248 	__u32 param;
249 	__u32 pad;
250 	__u64 value;
251 };
252 
253 /*
254  * Returns the offset for the BO in the GPU address space for this DRM fd.
255  * This is the same value returned by drm_panfrost_create_bo, if that was called
256  * from this DRM fd.
257  */
258 struct drm_panfrost_get_bo_offset {
259 	__u32 handle;
260 	__u32 pad;
261 	__u64 offset;
262 };
263 
264 struct drm_panfrost_perfcnt_enable {
265 	__u32 enable;
266 	/*
267 	 * On bifrost we have 2 sets of counters, this parameter defines the
268 	 * one to track.
269 	 */
270 	__u32 counterset;
271 };
272 
273 struct drm_panfrost_perfcnt_dump {
274 	__u64 buf_ptr;
275 };
276 
277 /* madvise provides a way to tell the kernel in case a buffers contents
278  * can be discarded under memory pressure, which is useful for userspace
279  * bo cache where we want to optimistically hold on to buffer allocate
280  * and potential mmap, but allow the pages to be discarded under memory
281  * pressure.
282  *
283  * Typical usage would involve madvise(DONTNEED) when buffer enters BO
284  * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
285  * In the WILLNEED case, 'retained' indicates to userspace whether the
286  * backing pages still exist.
287  */
288 #define PANFROST_MADV_WILLNEED 0	/* backing pages are needed, status returned in 'retained' */
289 #define PANFROST_MADV_DONTNEED 1	/* backing pages not needed */
290 
291 struct drm_panfrost_madvise {
292 	__u32 handle;         /* in, GEM handle */
293 	__u32 madv;           /* in, PANFROST_MADV_x */
294 	__u32 retained;       /* out, whether backing store still exists */
295 };
296 
297 /**
298  * struct drm_panfrost_set_label_bo - ioctl argument for labelling Panfrost BOs.
299  */
300 struct drm_panfrost_set_label_bo {
301 	/**
302 	 * @handle: Handle of the buffer object to label.
303 	 */
304 	__u32 handle;
305 	/**
306 	 * @pad: Must be zero.
307 	 */
308 	__u32 pad;
309 	/**
310 	 * @label: User pointer to a NUL-terminated string
311 	 *
312 	 * Length cannot be greater than 4096.
313 	 * NULL is permitted and means clear the label.
314 	 */
315 	__u64 label;
316 };
317 
318 /* Valid flags to pass to drm_panfrost_bo_sync_op */
319 #define PANFROST_BO_SYNC_CPU_CACHE_FLUSH			0
320 #define PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE		1
321 
322 /**
323  * struct drm_panthor_bo_flush_map_op - BO map sync op
324  */
325 struct drm_panfrost_bo_sync_op {
326 	/** @handle: Handle of the buffer object to sync. */
327 	__u32 handle;
328 
329 	/** @type: Type of sync operation. */
330 	__u32 type;
331 
332 	/**
333 	 * @offset: Offset into the BO at which the sync range starts.
334 	 *
335 	 * This will be rounded down to the nearest cache line as needed.
336 	 */
337 	__u32 offset;
338 
339 	/**
340 	 * @size: Size of the range to sync
341 	 *
342 	 * @size + @offset will be rounded up to the nearest cache line as
343 	 * needed.
344 	 */
345 	__u32 size;
346 };
347 
348 /**
349  * struct drm_panfrost_sync_bo - ioctl argument for syncing BO maps
350  */
351 struct drm_panfrost_sync_bo {
352 	/** Array of struct drm_panfrost_bo_sync_op */
353 	__u64 ops;
354 
355 	/** Number of BO sync ops */
356 	__u32 op_count;
357 
358 	__u32 pad;
359 };
360 
361 /** BO comes from a different subsystem. */
362 #define DRM_PANFROST_BO_IS_IMPORTED (1 << 0)
363 
364 struct drm_panfrost_query_bo_info {
365 	/** Handle of the object being queried. */
366 	__u32 handle;
367 
368 	/** Extra flags that are not coming from the BO_CREATE ioctl(). */
369 	__u32 extra_flags;
370 
371 	/** Flags passed at creation time. */
372 	__u32 create_flags;
373 
374 	/** Will be zero on return. */
375 	__u32 pad;
376 };
377 
378 /* Definitions for coredump decoding in user space */
379 #define PANFROSTDUMP_MAJOR 1
380 #define PANFROSTDUMP_MINOR 0
381 
382 #define PANFROSTDUMP_MAGIC 0x464E4150 /* PANF */
383 
384 #define PANFROSTDUMP_BUF_REG 0
385 #define PANFROSTDUMP_BUF_BOMAP (PANFROSTDUMP_BUF_REG + 1)
386 #define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
387 #define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
388 
389 /*
390  * This structure is the native endianness of the dumping machine, tools can
391  * detect the endianness by looking at the value in 'magic'.
392  */
393 struct panfrost_dump_object_header {
394 	__u32 magic;
395 	__u32 type;
396 	__u32 file_size;
397 	__u32 file_offset;
398 
399 	union {
400 		struct {
401 			__u64 jc;
402 			__u32 gpu_id;
403 			__u32 major;
404 			__u32 minor;
405 			__u64 nbos;
406 		} reghdr;
407 
408 		struct {
409 			__u32 valid;
410 			__u64 iova;
411 			__u32 data[2];
412 		} bomap;
413 
414 		/*
415 		 * Force same size in case we want to expand the header
416 		 * with new fields and also keep it 512-byte aligned
417 		 */
418 
419 		__u32 sizer[496];
420 	};
421 };
422 
423 /* Registers object, an array of these */
424 struct panfrost_dump_registers {
425 	__u32 reg;
426 	__u32 value;
427 };
428 
429 enum drm_panfrost_jm_ctx_priority {
430 	/**
431 	 * @PANFROST_JM_CTX_PRIORITY_LOW: Low priority context.
432 	 */
433 	PANFROST_JM_CTX_PRIORITY_LOW = 0,
434 
435 	/**
436 	 * @PANFROST_JM_CTX_PRIORITY_MEDIUM: Medium priority context.
437 	 */
438 	PANFROST_JM_CTX_PRIORITY_MEDIUM,
439 
440 	/**
441 	 * @PANFROST_JM_CTX_PRIORITY_HIGH: High priority context.
442 	 *
443 	 * Requires CAP_SYS_NICE or DRM_MASTER.
444 	 */
445 	PANFROST_JM_CTX_PRIORITY_HIGH,
446 };
447 
448 struct drm_panfrost_jm_ctx_create {
449 	/**
450 	 * @handle: Handle of the created JM context
451 	 */
452 	__u32 handle;
453 	/**
454 	 * @priority: Context priority (see enum drm_panfrost_jm_ctx_priority).
455 	 */
456 	__u32 priority;
457 };
458 
459 struct drm_panfrost_jm_ctx_destroy {
460 	/**
461 	 * @handle: Handle of the JM context to destroy.
462 	 *
463 	 * Must be a valid context handle returned by DRM_IOCTL_PANTHOR_JM_CTX_CREATE.
464 	 */
465 	__u32 handle;
466 	/**
467 	 * @pad: Padding field, must be zero.
468 	 */
469 	__u32 pad;
470 };
471 
472 #if defined(__cplusplus)
473 }
474 #endif
475 
476 #endif /* _PANFROST_DRM_H_ */
477