xref: /linux/include/uapi/drm/ivpu_accel.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #ifndef __UAPI_IVPU_DRM_H__
7 #define __UAPI_IVPU_DRM_H__
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 #define DRM_IVPU_GET_PARAM		  0x00
16 #define DRM_IVPU_SET_PARAM		  0x01
17 #define DRM_IVPU_BO_CREATE		  0x02
18 #define DRM_IVPU_BO_INFO		  0x03
19 #define DRM_IVPU_SUBMIT			  0x05
20 #define DRM_IVPU_BO_WAIT		  0x06
21 #define DRM_IVPU_METRIC_STREAMER_START	  0x07
22 #define DRM_IVPU_METRIC_STREAMER_STOP	  0x08
23 #define DRM_IVPU_METRIC_STREAMER_GET_DATA 0x09
24 #define DRM_IVPU_METRIC_STREAMER_GET_INFO 0x0a
25 
26 #define DRM_IOCTL_IVPU_GET_PARAM                                               \
27 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
28 
29 #define DRM_IOCTL_IVPU_SET_PARAM                                               \
30 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
31 
32 #define DRM_IOCTL_IVPU_BO_CREATE                                               \
33 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
34 
35 #define DRM_IOCTL_IVPU_BO_INFO                                                 \
36 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
37 
38 #define DRM_IOCTL_IVPU_SUBMIT                                                  \
39 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
40 
41 #define DRM_IOCTL_IVPU_BO_WAIT                                                 \
42 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
43 
44 #define DRM_IOCTL_IVPU_METRIC_STREAMER_START                                   \
45 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_START,            \
46 		 struct drm_ivpu_metric_streamer_start)
47 
48 #define DRM_IOCTL_IVPU_METRIC_STREAMER_STOP                                    \
49 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_STOP,              \
50 		struct drm_ivpu_metric_streamer_stop)
51 
52 #define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA                                \
53 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_DATA,         \
54 		 struct drm_ivpu_metric_streamer_get_data)
55 
56 #define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_INFO                                \
57 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_INFO,         \
58 		 struct drm_ivpu_metric_streamer_get_data)
59 
60 /**
61  * DOC: contexts
62  *
63  * VPU contexts have private virtual address space, job queues and priority.
64  * Each context is identified by an unique ID. Context is created on open().
65  */
66 
67 #define DRM_IVPU_PARAM_DEVICE_ID	    0
68 #define DRM_IVPU_PARAM_DEVICE_REVISION	    1
69 #define DRM_IVPU_PARAM_PLATFORM_TYPE	    2
70 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE	    3
71 #define DRM_IVPU_PARAM_NUM_CONTEXTS	    4
72 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
73 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY	    6 /* Deprecated */
74 #define DRM_IVPU_PARAM_CONTEXT_ID	    7
75 #define DRM_IVPU_PARAM_FW_API_VERSION	    8
76 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT	    9
77 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID  10
78 #define DRM_IVPU_PARAM_TILE_CONFIG	    11
79 #define DRM_IVPU_PARAM_SKU		    12
80 #define DRM_IVPU_PARAM_CAPABILITIES	    13
81 
82 #define DRM_IVPU_PLATFORM_TYPE_SILICON	    0
83 
84 /* Deprecated, use DRM_IVPU_JOB_PRIORITY */
85 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE	    0
86 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
87 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS	    2
88 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
89 
90 #define DRM_IVPU_JOB_PRIORITY_DEFAULT  0
91 #define DRM_IVPU_JOB_PRIORITY_IDLE     1
92 #define DRM_IVPU_JOB_PRIORITY_NORMAL   2
93 #define DRM_IVPU_JOB_PRIORITY_FOCUS    3
94 #define DRM_IVPU_JOB_PRIORITY_REALTIME 4
95 
96 /**
97  * DRM_IVPU_CAP_METRIC_STREAMER
98  *
99  * Metric streamer support. Provides sampling of various hardware performance
100  * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
101  */
102 #define DRM_IVPU_CAP_METRIC_STREAMER	1
103 /**
104  * DRM_IVPU_CAP_DMA_MEMORY_RANGE
105  *
106  * Driver has capability to allocate separate memory range
107  * accessible by hardware DMA.
108  */
109 #define DRM_IVPU_CAP_DMA_MEMORY_RANGE	2
110 
111 /**
112  * struct drm_ivpu_param - Get/Set VPU parameters
113  */
114 struct drm_ivpu_param {
115 	/**
116 	 * @param:
117 	 *
118 	 * Supported params:
119 	 *
120 	 * %DRM_IVPU_PARAM_DEVICE_ID:
121 	 * PCI Device ID of the VPU device (read-only)
122 	 *
123 	 * %DRM_IVPU_PARAM_DEVICE_REVISION:
124 	 * VPU device revision (read-only)
125 	 *
126 	 * %DRM_IVPU_PARAM_PLATFORM_TYPE:
127 	 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
128 	 * platform type when executing on a simulator or emulator (read-only)
129 	 *
130 	 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
131 	 * Current PLL frequency (read-only)
132 	 *
133 	 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
134 	 * Maximum number of simultaneously existing contexts (read-only)
135 	 *
136 	 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
137 	 * Lowest VPU virtual address available in the current context (read-only)
138 	 *
139 	 * %DRM_IVPU_PARAM_CONTEXT_ID:
140 	 * Current context ID, always greater than 0 (read-only)
141 	 *
142 	 * %DRM_IVPU_PARAM_FW_API_VERSION:
143 	 * Firmware API version array (read-only)
144 	 *
145 	 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
146 	 * Heartbeat value from an engine (read-only).
147 	 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
148 	 *
149 	 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
150 	 * Device-unique inference ID (read-only)
151 	 *
152 	 * %DRM_IVPU_PARAM_TILE_CONFIG:
153 	 * VPU tile configuration  (read-only)
154 	 *
155 	 * %DRM_IVPU_PARAM_SKU:
156 	 * VPU SKU ID (read-only)
157 	 *
158 	 * %DRM_IVPU_PARAM_CAPABILITIES:
159 	 * Supported capabilities (read-only)
160 	 */
161 	__u32 param;
162 
163 	/** @index: Index for params that have multiple instances */
164 	__u32 index;
165 
166 	/** @value: Param value */
167 	__u64 value;
168 };
169 
170 #define DRM_IVPU_BO_SHAVE_MEM  0x00000001
171 #define DRM_IVPU_BO_HIGH_MEM   DRM_IVPU_BO_SHAVE_MEM
172 #define DRM_IVPU_BO_MAPPABLE   0x00000002
173 #define DRM_IVPU_BO_DMA_MEM    0x00000004
174 
175 #define DRM_IVPU_BO_CACHED     0x00000000
176 #define DRM_IVPU_BO_UNCACHED   0x00010000
177 #define DRM_IVPU_BO_WC	       0x00020000
178 #define DRM_IVPU_BO_CACHE_MASK 0x00030000
179 
180 #define DRM_IVPU_BO_FLAGS \
181 	(DRM_IVPU_BO_HIGH_MEM | \
182 	 DRM_IVPU_BO_MAPPABLE | \
183 	 DRM_IVPU_BO_DMA_MEM | \
184 	 DRM_IVPU_BO_CACHE_MASK)
185 
186 /**
187  * struct drm_ivpu_bo_create - Create BO backed by SHMEM
188  *
189  * Create GEM buffer object allocated in SHMEM memory.
190  */
191 struct drm_ivpu_bo_create {
192 	/** @size: The size in bytes of the allocated memory */
193 	__u64 size;
194 
195 	/**
196 	 * @flags:
197 	 *
198 	 * Supported flags:
199 	 *
200 	 * %DRM_IVPU_BO_HIGH_MEM:
201 	 *
202 	 * Allocate VPU address from >4GB range.
203 	 * Buffer object with vpu address >4GB can be always accessed by the
204 	 * VPU DMA engine, but some HW generation may not be able to access
205 	 * this memory from then firmware running on the VPU management processor.
206 	 * Suitable for input, output and some scratch buffers.
207 	 *
208 	 * %DRM_IVPU_BO_MAPPABLE:
209 	 *
210 	 * Buffer object can be mapped using mmap().
211 	 *
212 	 * %DRM_IVPU_BO_CACHED:
213 	 *
214 	 * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
215 	 * This is the default caching mode.
216 	 *
217 	 * %DRM_IVPU_BO_UNCACHED:
218 	 *
219 	 * Not supported. Use DRM_IVPU_BO_WC instead.
220 	 *
221 	 * %DRM_IVPU_BO_WC:
222 	 *
223 	 * Allocated BO will use write combining buffer for writes but reads will be
224 	 * uncached.
225 	 */
226 	__u32 flags;
227 
228 	/** @handle: Returned GEM object handle */
229 	__u32 handle;
230 
231 	/** @vpu_addr: Returned VPU virtual address */
232 	__u64 vpu_addr;
233 };
234 
235 /**
236  * struct drm_ivpu_bo_info - Query buffer object info
237  */
238 struct drm_ivpu_bo_info {
239 	/** @handle: Handle of the queried BO */
240 	__u32 handle;
241 
242 	/** @flags: Returned flags used to create the BO */
243 	__u32 flags;
244 
245 	/** @vpu_addr: Returned VPU virtual address */
246 	__u64 vpu_addr;
247 
248 	/**
249 	 * @mmap_offset:
250 	 *
251 	 * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
252 	 */
253 	__u64 mmap_offset;
254 
255 	/** @size: Returned GEM object size, aligned to PAGE_SIZE */
256 	__u64 size;
257 };
258 
259 /* drm_ivpu_submit engines */
260 #define DRM_IVPU_ENGINE_COMPUTE 0
261 #define DRM_IVPU_ENGINE_COPY    1 /* Deprecated */
262 
263 /**
264  * struct drm_ivpu_submit - Submit commands to the VPU
265  *
266  * Execute a single command buffer on a given VPU engine.
267  * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
268  *
269  * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
270  */
271 struct drm_ivpu_submit {
272 	/**
273 	 * @buffers_ptr:
274 	 *
275 	 * A pointer to an u32 array of GEM handles of the BOs required for this job.
276 	 * The number of elements in the array must be equal to the value given by @buffer_count.
277 	 *
278 	 * The first BO is the command buffer. The rest of array has to contain all
279 	 * BOs referenced from the command buffer.
280 	 */
281 	__u64 buffers_ptr;
282 
283 	/** @buffer_count: Number of elements in the @buffers_ptr */
284 	__u32 buffer_count;
285 
286 	/**
287 	 * @engine: Select the engine this job should be executed on
288 	 *
289 	 * %DRM_IVPU_ENGINE_COMPUTE:
290 	 *
291 	 * Performs Deep Learning Neural Compute Inference Operations
292 	 */
293 	__u32 engine;
294 
295 	/** @flags: Reserved for future use - must be zero */
296 	__u32 flags;
297 
298 	/**
299 	 * @commands_offset:
300 	 *
301 	 * Offset inside the first buffer in @buffers_ptr containing commands
302 	 * to be executed. The offset has to be 8-byte aligned.
303 	 */
304 	__u32 commands_offset;
305 
306 	/**
307 	 * @priority:
308 	 *
309 	 * Priority to be set for related job command queue, can be one of the following:
310 	 * %DRM_IVPU_JOB_PRIORITY_DEFAULT
311 	 * %DRM_IVPU_JOB_PRIORITY_IDLE
312 	 * %DRM_IVPU_JOB_PRIORITY_NORMAL
313 	 * %DRM_IVPU_JOB_PRIORITY_FOCUS
314 	 * %DRM_IVPU_JOB_PRIORITY_REALTIME
315 	 */
316 	__u32 priority;
317 };
318 
319 /* drm_ivpu_bo_wait job status codes */
320 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
321 #define DRM_IVPU_JOB_STATUS_ABORTED 256
322 
323 /**
324  * struct drm_ivpu_bo_wait - Wait for BO to become inactive
325  *
326  * Blocks until a given buffer object becomes inactive.
327  * With @timeout_ms set to 0 returns immediately.
328  */
329 struct drm_ivpu_bo_wait {
330 	/** @handle: Handle to the buffer object to be waited on */
331 	__u32 handle;
332 
333 	/** @flags: Reserved for future use - must be zero */
334 	__u32 flags;
335 
336 	/** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
337 	__s64 timeout_ns;
338 
339 	/**
340 	 * @job_status:
341 	 *
342 	 * Job status code which is updated after the job is completed.
343 	 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
344 	 * Valid only if @handle points to a command buffer.
345 	 */
346 	__u32 job_status;
347 
348 	/** @pad: Padding - must be zero */
349 	__u32 pad;
350 };
351 
352 /**
353  * struct drm_ivpu_metric_streamer_start - Start collecting metric data
354  */
355 struct drm_ivpu_metric_streamer_start {
356 	/** @metric_group_mask: Indicates metric streamer instance */
357 	__u64 metric_group_mask;
358 	/** @sampling_period_ns: Sampling period in nanoseconds */
359 	__u64 sampling_period_ns;
360 	/**
361 	 * @read_period_samples:
362 	 *
363 	 * Number of samples after which user space will try to read the data.
364 	 * Reading the data after significantly longer period may cause data loss.
365 	 */
366 	__u32 read_period_samples;
367 	/** @sample_size: Returned size of a single sample in bytes */
368 	__u32 sample_size;
369 	/** @max_data_size: Returned max @data_size from %DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA */
370 	__u32 max_data_size;
371 };
372 
373 /**
374  * struct drm_ivpu_metric_streamer_get_data - Copy collected metric data
375  */
376 struct drm_ivpu_metric_streamer_get_data {
377 	/** @metric_group_mask: Indicates metric streamer instance */
378 	__u64 metric_group_mask;
379 	/** @buffer_ptr: A pointer to a destination for the copied data */
380 	__u64 buffer_ptr;
381 	/** @buffer_size: Size of the destination buffer */
382 	__u64 buffer_size;
383 	/**
384 	 * @data_size: Returned size of copied metric data
385 	 *
386 	 * If the @buffer_size is zero, returns the amount of data ready to be copied.
387 	 */
388 	__u64 data_size;
389 };
390 
391 /**
392  * struct drm_ivpu_metric_streamer_stop - Stop collecting metric data
393  */
394 struct drm_ivpu_metric_streamer_stop {
395 	/** @metric_group_mask: Indicates metric streamer instance */
396 	__u64 metric_group_mask;
397 };
398 
399 #if defined(__cplusplus)
400 }
401 #endif
402 
403 #endif /* __UAPI_IVPU_DRM_H__ */
404