1 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #ifndef __UAPI_IVPU_DRM_H__ 7 #define __UAPI_IVPU_DRM_H__ 8 9 #include "drm.h" 10 11 #if defined(__cplusplus) 12 extern "C" { 13 #endif 14 15 #define DRM_IVPU_DRIVER_MAJOR 1 16 #define DRM_IVPU_DRIVER_MINOR 0 17 18 #define DRM_IVPU_GET_PARAM 0x00 19 #define DRM_IVPU_SET_PARAM 0x01 20 #define DRM_IVPU_BO_CREATE 0x02 21 #define DRM_IVPU_BO_INFO 0x03 22 #define DRM_IVPU_SUBMIT 0x05 23 #define DRM_IVPU_BO_WAIT 0x06 24 25 #define DRM_IOCTL_IVPU_GET_PARAM \ 26 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param) 27 28 #define DRM_IOCTL_IVPU_SET_PARAM \ 29 DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param) 30 31 #define DRM_IOCTL_IVPU_BO_CREATE \ 32 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create) 33 34 #define DRM_IOCTL_IVPU_BO_INFO \ 35 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info) 36 37 #define DRM_IOCTL_IVPU_SUBMIT \ 38 DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit) 39 40 #define DRM_IOCTL_IVPU_BO_WAIT \ 41 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait) 42 43 /** 44 * DOC: contexts 45 * 46 * VPU contexts have private virtual address space, job queues and priority. 47 * Each context is identified by an unique ID. Context is created on open(). 48 */ 49 50 #define DRM_IVPU_PARAM_DEVICE_ID 0 51 #define DRM_IVPU_PARAM_DEVICE_REVISION 1 52 #define DRM_IVPU_PARAM_PLATFORM_TYPE 2 53 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3 54 #define DRM_IVPU_PARAM_NUM_CONTEXTS 4 55 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5 56 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */ 57 #define DRM_IVPU_PARAM_CONTEXT_ID 7 58 #define DRM_IVPU_PARAM_FW_API_VERSION 8 59 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9 60 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10 61 #define DRM_IVPU_PARAM_TILE_CONFIG 11 62 #define DRM_IVPU_PARAM_SKU 12 63 #define DRM_IVPU_PARAM_CAPABILITIES 13 64 65 #define DRM_IVPU_PLATFORM_TYPE_SILICON 0 66 67 /* Deprecated, use DRM_IVPU_JOB_PRIORITY */ 68 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0 69 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1 70 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2 71 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3 72 73 #define DRM_IVPU_JOB_PRIORITY_DEFAULT 0 74 #define DRM_IVPU_JOB_PRIORITY_IDLE 1 75 #define DRM_IVPU_JOB_PRIORITY_NORMAL 2 76 #define DRM_IVPU_JOB_PRIORITY_FOCUS 3 77 #define DRM_IVPU_JOB_PRIORITY_REALTIME 4 78 79 /** 80 * DRM_IVPU_CAP_METRIC_STREAMER 81 * 82 * Metric streamer support. Provides sampling of various hardware performance 83 * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling. 84 */ 85 #define DRM_IVPU_CAP_METRIC_STREAMER 1 86 /** 87 * DRM_IVPU_CAP_DMA_MEMORY_RANGE 88 * 89 * Driver has capability to allocate separate memory range 90 * accessible by hardware DMA. 91 */ 92 #define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 93 94 /** 95 * struct drm_ivpu_param - Get/Set VPU parameters 96 */ 97 struct drm_ivpu_param { 98 /** 99 * @param: 100 * 101 * Supported params: 102 * 103 * %DRM_IVPU_PARAM_DEVICE_ID: 104 * PCI Device ID of the VPU device (read-only) 105 * 106 * %DRM_IVPU_PARAM_DEVICE_REVISION: 107 * VPU device revision (read-only) 108 * 109 * %DRM_IVPU_PARAM_PLATFORM_TYPE: 110 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific 111 * platform type when executing on a simulator or emulator (read-only) 112 * 113 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE: 114 * Current PLL frequency (read-only) 115 * 116 * %DRM_IVPU_PARAM_NUM_CONTEXTS: 117 * Maximum number of simultaneously existing contexts (read-only) 118 * 119 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: 120 * Lowest VPU virtual address available in the current context (read-only) 121 * 122 * %DRM_IVPU_PARAM_CONTEXT_ID: 123 * Current context ID, always greater than 0 (read-only) 124 * 125 * %DRM_IVPU_PARAM_FW_API_VERSION: 126 * Firmware API version array (read-only) 127 * 128 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT: 129 * Heartbeat value from an engine (read-only). 130 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index. 131 * 132 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: 133 * Device-unique inference ID (read-only) 134 * 135 * %DRM_IVPU_PARAM_TILE_CONFIG: 136 * VPU tile configuration (read-only) 137 * 138 * %DRM_IVPU_PARAM_SKU: 139 * VPU SKU ID (read-only) 140 * 141 * %DRM_IVPU_PARAM_CAPABILITIES: 142 * Supported capabilities (read-only) 143 */ 144 __u32 param; 145 146 /** @index: Index for params that have multiple instances */ 147 __u32 index; 148 149 /** @value: Param value */ 150 __u64 value; 151 }; 152 153 #define DRM_IVPU_BO_SHAVE_MEM 0x00000001 154 #define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM 155 #define DRM_IVPU_BO_MAPPABLE 0x00000002 156 #define DRM_IVPU_BO_DMA_MEM 0x00000004 157 158 #define DRM_IVPU_BO_CACHED 0x00000000 159 #define DRM_IVPU_BO_UNCACHED 0x00010000 160 #define DRM_IVPU_BO_WC 0x00020000 161 #define DRM_IVPU_BO_CACHE_MASK 0x00030000 162 163 #define DRM_IVPU_BO_FLAGS \ 164 (DRM_IVPU_BO_HIGH_MEM | \ 165 DRM_IVPU_BO_MAPPABLE | \ 166 DRM_IVPU_BO_DMA_MEM | \ 167 DRM_IVPU_BO_CACHE_MASK) 168 169 /** 170 * struct drm_ivpu_bo_create - Create BO backed by SHMEM 171 * 172 * Create GEM buffer object allocated in SHMEM memory. 173 */ 174 struct drm_ivpu_bo_create { 175 /** @size: The size in bytes of the allocated memory */ 176 __u64 size; 177 178 /** 179 * @flags: 180 * 181 * Supported flags: 182 * 183 * %DRM_IVPU_BO_HIGH_MEM: 184 * 185 * Allocate VPU address from >4GB range. 186 * Buffer object with vpu address >4GB can be always accessed by the 187 * VPU DMA engine, but some HW generation may not be able to access 188 * this memory from then firmware running on the VPU management processor. 189 * Suitable for input, output and some scratch buffers. 190 * 191 * %DRM_IVPU_BO_MAPPABLE: 192 * 193 * Buffer object can be mapped using mmap(). 194 * 195 * %DRM_IVPU_BO_CACHED: 196 * 197 * Allocated BO will be cached on host side (WB) and snooped on the VPU side. 198 * This is the default caching mode. 199 * 200 * %DRM_IVPU_BO_UNCACHED: 201 * 202 * Not supported. Use DRM_IVPU_BO_WC instead. 203 * 204 * %DRM_IVPU_BO_WC: 205 * 206 * Allocated BO will use write combining buffer for writes but reads will be 207 * uncached. 208 */ 209 __u32 flags; 210 211 /** @handle: Returned GEM object handle */ 212 __u32 handle; 213 214 /** @vpu_addr: Returned VPU virtual address */ 215 __u64 vpu_addr; 216 }; 217 218 /** 219 * struct drm_ivpu_bo_info - Query buffer object info 220 */ 221 struct drm_ivpu_bo_info { 222 /** @handle: Handle of the queried BO */ 223 __u32 handle; 224 225 /** @flags: Returned flags used to create the BO */ 226 __u32 flags; 227 228 /** @vpu_addr: Returned VPU virtual address */ 229 __u64 vpu_addr; 230 231 /** 232 * @mmap_offset: 233 * 234 * Returned offset to be used in mmap(). 0 in case the BO is not mappable. 235 */ 236 __u64 mmap_offset; 237 238 /** @size: Returned GEM object size, aligned to PAGE_SIZE */ 239 __u64 size; 240 }; 241 242 /* drm_ivpu_submit engines */ 243 #define DRM_IVPU_ENGINE_COMPUTE 0 244 #define DRM_IVPU_ENGINE_COPY 1 245 246 /** 247 * struct drm_ivpu_submit - Submit commands to the VPU 248 * 249 * Execute a single command buffer on a given VPU engine. 250 * Handles to all referenced buffer objects have to be provided in @buffers_ptr. 251 * 252 * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl. 253 */ 254 struct drm_ivpu_submit { 255 /** 256 * @buffers_ptr: 257 * 258 * A pointer to an u32 array of GEM handles of the BOs required for this job. 259 * The number of elements in the array must be equal to the value given by @buffer_count. 260 * 261 * The first BO is the command buffer. The rest of array has to contain all 262 * BOs referenced from the command buffer. 263 */ 264 __u64 buffers_ptr; 265 266 /** @buffer_count: Number of elements in the @buffers_ptr */ 267 __u32 buffer_count; 268 269 /** 270 * @engine: Select the engine this job should be executed on 271 * 272 * %DRM_IVPU_ENGINE_COMPUTE: 273 * 274 * Performs Deep Learning Neural Compute Inference Operations 275 * 276 * %DRM_IVPU_ENGINE_COPY: 277 * 278 * Performs memory copy operations to/from system memory allocated for VPU 279 */ 280 __u32 engine; 281 282 /** @flags: Reserved for future use - must be zero */ 283 __u32 flags; 284 285 /** 286 * @commands_offset: 287 * 288 * Offset inside the first buffer in @buffers_ptr containing commands 289 * to be executed. The offset has to be 8-byte aligned. 290 */ 291 __u32 commands_offset; 292 293 /** 294 * @priority: 295 * 296 * Priority to be set for related job command queue, can be one of the following: 297 * %DRM_IVPU_JOB_PRIORITY_DEFAULT 298 * %DRM_IVPU_JOB_PRIORITY_IDLE 299 * %DRM_IVPU_JOB_PRIORITY_NORMAL 300 * %DRM_IVPU_JOB_PRIORITY_FOCUS 301 * %DRM_IVPU_JOB_PRIORITY_REALTIME 302 */ 303 __u32 priority; 304 }; 305 306 /* drm_ivpu_bo_wait job status codes */ 307 #define DRM_IVPU_JOB_STATUS_SUCCESS 0 308 #define DRM_IVPU_JOB_STATUS_ABORTED 256 309 310 /** 311 * struct drm_ivpu_bo_wait - Wait for BO to become inactive 312 * 313 * Blocks until a given buffer object becomes inactive. 314 * With @timeout_ms set to 0 returns immediately. 315 */ 316 struct drm_ivpu_bo_wait { 317 /** @handle: Handle to the buffer object to be waited on */ 318 __u32 handle; 319 320 /** @flags: Reserved for future use - must be zero */ 321 __u32 flags; 322 323 /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */ 324 __s64 timeout_ns; 325 326 /** 327 * @job_status: 328 * 329 * Job status code which is updated after the job is completed. 330 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise. 331 * Valid only if @handle points to a command buffer. 332 */ 333 __u32 job_status; 334 335 /** @pad: Padding - must be zero */ 336 __u32 pad; 337 }; 338 339 #if defined(__cplusplus) 340 } 341 #endif 342 343 #endif /* __UAPI_IVPU_DRM_H__ */ 344