xref: /linux/include/uapi/drm/ivpu_accel.h (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #ifndef __UAPI_IVPU_DRM_H__
7 #define __UAPI_IVPU_DRM_H__
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 #define DRM_IVPU_DRIVER_MAJOR 1
16 #define DRM_IVPU_DRIVER_MINOR 0
17 
18 #define DRM_IVPU_GET_PARAM		  0x00
19 #define DRM_IVPU_SET_PARAM		  0x01
20 #define DRM_IVPU_BO_CREATE		  0x02
21 #define DRM_IVPU_BO_INFO		  0x03
22 #define DRM_IVPU_SUBMIT			  0x05
23 #define DRM_IVPU_BO_WAIT		  0x06
24 #define DRM_IVPU_METRIC_STREAMER_START	  0x07
25 #define DRM_IVPU_METRIC_STREAMER_STOP	  0x08
26 #define DRM_IVPU_METRIC_STREAMER_GET_DATA 0x09
27 #define DRM_IVPU_METRIC_STREAMER_GET_INFO 0x0a
28 
29 #define DRM_IOCTL_IVPU_GET_PARAM                                               \
30 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
31 
32 #define DRM_IOCTL_IVPU_SET_PARAM                                               \
33 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
34 
35 #define DRM_IOCTL_IVPU_BO_CREATE                                               \
36 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
37 
38 #define DRM_IOCTL_IVPU_BO_INFO                                                 \
39 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
40 
41 #define DRM_IOCTL_IVPU_SUBMIT                                                  \
42 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
43 
44 #define DRM_IOCTL_IVPU_BO_WAIT                                                 \
45 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
46 
47 #define DRM_IOCTL_IVPU_METRIC_STREAMER_START                                   \
48 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_START,            \
49 		 struct drm_ivpu_metric_streamer_start)
50 
51 #define DRM_IOCTL_IVPU_METRIC_STREAMER_STOP                                    \
52 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_STOP,              \
53 		struct drm_ivpu_metric_streamer_stop)
54 
55 #define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA                                \
56 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_DATA,         \
57 		 struct drm_ivpu_metric_streamer_get_data)
58 
59 #define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_INFO                                \
60 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_INFO,         \
61 		 struct drm_ivpu_metric_streamer_get_data)
62 
63 /**
64  * DOC: contexts
65  *
66  * VPU contexts have private virtual address space, job queues and priority.
67  * Each context is identified by an unique ID. Context is created on open().
68  */
69 
70 #define DRM_IVPU_PARAM_DEVICE_ID	    0
71 #define DRM_IVPU_PARAM_DEVICE_REVISION	    1
72 #define DRM_IVPU_PARAM_PLATFORM_TYPE	    2
73 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE	    3
74 #define DRM_IVPU_PARAM_NUM_CONTEXTS	    4
75 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
76 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY	    6 /* Deprecated */
77 #define DRM_IVPU_PARAM_CONTEXT_ID	    7
78 #define DRM_IVPU_PARAM_FW_API_VERSION	    8
79 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT	    9
80 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID  10
81 #define DRM_IVPU_PARAM_TILE_CONFIG	    11
82 #define DRM_IVPU_PARAM_SKU		    12
83 #define DRM_IVPU_PARAM_CAPABILITIES	    13
84 
85 #define DRM_IVPU_PLATFORM_TYPE_SILICON	    0
86 
87 /* Deprecated, use DRM_IVPU_JOB_PRIORITY */
88 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE	    0
89 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
90 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS	    2
91 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
92 
93 #define DRM_IVPU_JOB_PRIORITY_DEFAULT  0
94 #define DRM_IVPU_JOB_PRIORITY_IDLE     1
95 #define DRM_IVPU_JOB_PRIORITY_NORMAL   2
96 #define DRM_IVPU_JOB_PRIORITY_FOCUS    3
97 #define DRM_IVPU_JOB_PRIORITY_REALTIME 4
98 
99 /**
100  * DRM_IVPU_CAP_METRIC_STREAMER
101  *
102  * Metric streamer support. Provides sampling of various hardware performance
103  * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
104  */
105 #define DRM_IVPU_CAP_METRIC_STREAMER	1
106 /**
107  * DRM_IVPU_CAP_DMA_MEMORY_RANGE
108  *
109  * Driver has capability to allocate separate memory range
110  * accessible by hardware DMA.
111  */
112 #define DRM_IVPU_CAP_DMA_MEMORY_RANGE	2
113 
114 /**
115  * struct drm_ivpu_param - Get/Set VPU parameters
116  */
117 struct drm_ivpu_param {
118 	/**
119 	 * @param:
120 	 *
121 	 * Supported params:
122 	 *
123 	 * %DRM_IVPU_PARAM_DEVICE_ID:
124 	 * PCI Device ID of the VPU device (read-only)
125 	 *
126 	 * %DRM_IVPU_PARAM_DEVICE_REVISION:
127 	 * VPU device revision (read-only)
128 	 *
129 	 * %DRM_IVPU_PARAM_PLATFORM_TYPE:
130 	 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
131 	 * platform type when executing on a simulator or emulator (read-only)
132 	 *
133 	 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
134 	 * Current PLL frequency (read-only)
135 	 *
136 	 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
137 	 * Maximum number of simultaneously existing contexts (read-only)
138 	 *
139 	 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
140 	 * Lowest VPU virtual address available in the current context (read-only)
141 	 *
142 	 * %DRM_IVPU_PARAM_CONTEXT_ID:
143 	 * Current context ID, always greater than 0 (read-only)
144 	 *
145 	 * %DRM_IVPU_PARAM_FW_API_VERSION:
146 	 * Firmware API version array (read-only)
147 	 *
148 	 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
149 	 * Heartbeat value from an engine (read-only).
150 	 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
151 	 *
152 	 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
153 	 * Device-unique inference ID (read-only)
154 	 *
155 	 * %DRM_IVPU_PARAM_TILE_CONFIG:
156 	 * VPU tile configuration  (read-only)
157 	 *
158 	 * %DRM_IVPU_PARAM_SKU:
159 	 * VPU SKU ID (read-only)
160 	 *
161 	 * %DRM_IVPU_PARAM_CAPABILITIES:
162 	 * Supported capabilities (read-only)
163 	 */
164 	__u32 param;
165 
166 	/** @index: Index for params that have multiple instances */
167 	__u32 index;
168 
169 	/** @value: Param value */
170 	__u64 value;
171 };
172 
173 #define DRM_IVPU_BO_SHAVE_MEM  0x00000001
174 #define DRM_IVPU_BO_HIGH_MEM   DRM_IVPU_BO_SHAVE_MEM
175 #define DRM_IVPU_BO_MAPPABLE   0x00000002
176 #define DRM_IVPU_BO_DMA_MEM    0x00000004
177 
178 #define DRM_IVPU_BO_CACHED     0x00000000
179 #define DRM_IVPU_BO_UNCACHED   0x00010000
180 #define DRM_IVPU_BO_WC	       0x00020000
181 #define DRM_IVPU_BO_CACHE_MASK 0x00030000
182 
183 #define DRM_IVPU_BO_FLAGS \
184 	(DRM_IVPU_BO_HIGH_MEM | \
185 	 DRM_IVPU_BO_MAPPABLE | \
186 	 DRM_IVPU_BO_DMA_MEM | \
187 	 DRM_IVPU_BO_CACHE_MASK)
188 
189 /**
190  * struct drm_ivpu_bo_create - Create BO backed by SHMEM
191  *
192  * Create GEM buffer object allocated in SHMEM memory.
193  */
194 struct drm_ivpu_bo_create {
195 	/** @size: The size in bytes of the allocated memory */
196 	__u64 size;
197 
198 	/**
199 	 * @flags:
200 	 *
201 	 * Supported flags:
202 	 *
203 	 * %DRM_IVPU_BO_HIGH_MEM:
204 	 *
205 	 * Allocate VPU address from >4GB range.
206 	 * Buffer object with vpu address >4GB can be always accessed by the
207 	 * VPU DMA engine, but some HW generation may not be able to access
208 	 * this memory from then firmware running on the VPU management processor.
209 	 * Suitable for input, output and some scratch buffers.
210 	 *
211 	 * %DRM_IVPU_BO_MAPPABLE:
212 	 *
213 	 * Buffer object can be mapped using mmap().
214 	 *
215 	 * %DRM_IVPU_BO_CACHED:
216 	 *
217 	 * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
218 	 * This is the default caching mode.
219 	 *
220 	 * %DRM_IVPU_BO_UNCACHED:
221 	 *
222 	 * Not supported. Use DRM_IVPU_BO_WC instead.
223 	 *
224 	 * %DRM_IVPU_BO_WC:
225 	 *
226 	 * Allocated BO will use write combining buffer for writes but reads will be
227 	 * uncached.
228 	 */
229 	__u32 flags;
230 
231 	/** @handle: Returned GEM object handle */
232 	__u32 handle;
233 
234 	/** @vpu_addr: Returned VPU virtual address */
235 	__u64 vpu_addr;
236 };
237 
238 /**
239  * struct drm_ivpu_bo_info - Query buffer object info
240  */
241 struct drm_ivpu_bo_info {
242 	/** @handle: Handle of the queried BO */
243 	__u32 handle;
244 
245 	/** @flags: Returned flags used to create the BO */
246 	__u32 flags;
247 
248 	/** @vpu_addr: Returned VPU virtual address */
249 	__u64 vpu_addr;
250 
251 	/**
252 	 * @mmap_offset:
253 	 *
254 	 * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
255 	 */
256 	__u64 mmap_offset;
257 
258 	/** @size: Returned GEM object size, aligned to PAGE_SIZE */
259 	__u64 size;
260 };
261 
262 /* drm_ivpu_submit engines */
263 #define DRM_IVPU_ENGINE_COMPUTE 0
264 #define DRM_IVPU_ENGINE_COPY    1
265 
266 /**
267  * struct drm_ivpu_submit - Submit commands to the VPU
268  *
269  * Execute a single command buffer on a given VPU engine.
270  * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
271  *
272  * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
273  */
274 struct drm_ivpu_submit {
275 	/**
276 	 * @buffers_ptr:
277 	 *
278 	 * A pointer to an u32 array of GEM handles of the BOs required for this job.
279 	 * The number of elements in the array must be equal to the value given by @buffer_count.
280 	 *
281 	 * The first BO is the command buffer. The rest of array has to contain all
282 	 * BOs referenced from the command buffer.
283 	 */
284 	__u64 buffers_ptr;
285 
286 	/** @buffer_count: Number of elements in the @buffers_ptr */
287 	__u32 buffer_count;
288 
289 	/**
290 	 * @engine: Select the engine this job should be executed on
291 	 *
292 	 * %DRM_IVPU_ENGINE_COMPUTE:
293 	 *
294 	 * Performs Deep Learning Neural Compute Inference Operations
295 	 *
296 	 * %DRM_IVPU_ENGINE_COPY:
297 	 *
298 	 * Performs memory copy operations to/from system memory allocated for VPU
299 	 */
300 	__u32 engine;
301 
302 	/** @flags: Reserved for future use - must be zero */
303 	__u32 flags;
304 
305 	/**
306 	 * @commands_offset:
307 	 *
308 	 * Offset inside the first buffer in @buffers_ptr containing commands
309 	 * to be executed. The offset has to be 8-byte aligned.
310 	 */
311 	__u32 commands_offset;
312 
313 	/**
314 	 * @priority:
315 	 *
316 	 * Priority to be set for related job command queue, can be one of the following:
317 	 * %DRM_IVPU_JOB_PRIORITY_DEFAULT
318 	 * %DRM_IVPU_JOB_PRIORITY_IDLE
319 	 * %DRM_IVPU_JOB_PRIORITY_NORMAL
320 	 * %DRM_IVPU_JOB_PRIORITY_FOCUS
321 	 * %DRM_IVPU_JOB_PRIORITY_REALTIME
322 	 */
323 	__u32 priority;
324 };
325 
326 /* drm_ivpu_bo_wait job status codes */
327 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
328 #define DRM_IVPU_JOB_STATUS_ABORTED 256
329 
330 /**
331  * struct drm_ivpu_bo_wait - Wait for BO to become inactive
332  *
333  * Blocks until a given buffer object becomes inactive.
334  * With @timeout_ms set to 0 returns immediately.
335  */
336 struct drm_ivpu_bo_wait {
337 	/** @handle: Handle to the buffer object to be waited on */
338 	__u32 handle;
339 
340 	/** @flags: Reserved for future use - must be zero */
341 	__u32 flags;
342 
343 	/** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
344 	__s64 timeout_ns;
345 
346 	/**
347 	 * @job_status:
348 	 *
349 	 * Job status code which is updated after the job is completed.
350 	 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
351 	 * Valid only if @handle points to a command buffer.
352 	 */
353 	__u32 job_status;
354 
355 	/** @pad: Padding - must be zero */
356 	__u32 pad;
357 };
358 
359 /**
360  * struct drm_ivpu_metric_streamer_start - Start collecting metric data
361  */
362 struct drm_ivpu_metric_streamer_start {
363 	/** @metric_group_mask: Indicates metric streamer instance */
364 	__u64 metric_group_mask;
365 	/** @sampling_period_ns: Sampling period in nanoseconds */
366 	__u64 sampling_period_ns;
367 	/**
368 	 * @read_period_samples:
369 	 *
370 	 * Number of samples after which user space will try to read the data.
371 	 * Reading the data after significantly longer period may cause data loss.
372 	 */
373 	__u32 read_period_samples;
374 	/** @sample_size: Returned size of a single sample in bytes */
375 	__u32 sample_size;
376 	/** @max_data_size: Returned max @data_size from %DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA */
377 	__u32 max_data_size;
378 };
379 
380 /**
381  * struct drm_ivpu_metric_streamer_get_data - Copy collected metric data
382  */
383 struct drm_ivpu_metric_streamer_get_data {
384 	/** @metric_group_mask: Indicates metric streamer instance */
385 	__u64 metric_group_mask;
386 	/** @buffer_ptr: A pointer to a destination for the copied data */
387 	__u64 buffer_ptr;
388 	/** @buffer_size: Size of the destination buffer */
389 	__u64 buffer_size;
390 	/**
391 	 * @data_size: Returned size of copied metric data
392 	 *
393 	 * If the @buffer_size is zero, returns the amount of data ready to be copied.
394 	 */
395 	__u64 data_size;
396 };
397 
398 /**
399  * struct drm_ivpu_metric_streamer_stop - Stop collecting metric data
400  */
401 struct drm_ivpu_metric_streamer_stop {
402 	/** @metric_group_mask: Indicates metric streamer instance */
403 	__u64 metric_group_mask;
404 };
405 
406 #if defined(__cplusplus)
407 }
408 #endif
409 
410 #endif /* __UAPI_IVPU_DRM_H__ */
411