1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #ifndef _UAPI_XE_DRM_H_ 7 #define _UAPI_XE_DRM_H_ 8 9 #include "drm.h" 10 11 #if defined(__cplusplus) 12 extern "C" { 13 #endif 14 15 /* 16 * Please note that modifications to all structs defined here are 17 * subject to backwards-compatibility constraints. 18 * Sections in this file are organized as follows: 19 * 1. IOCTL definition 20 * 2. Extension definition and helper structs 21 * 3. IOCTL's Query structs in the order of the Query's entries. 22 * 4. The rest of IOCTL structs in the order of IOCTL declaration. 23 */ 24 25 /** 26 * DOC: Xe Device Block Diagram 27 * 28 * The diagram below represents a high-level simplification of a discrete 29 * GPU supported by the Xe driver. It shows some device components which 30 * are necessary to understand this API, as well as how their relations 31 * to each other. This diagram does not represent real hardware:: 32 * 33 * ┌──────────────────────────────────────────────────────────────────┐ 34 * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │ 35 * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │ 36 * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │ 37 * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │ 38 * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │ 39 * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ 40 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 41 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │ 42 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 43 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 44 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │ 45 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 46 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 47 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │ 48 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 49 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 50 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │ 51 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 52 * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 53 * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │ 54 * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 55 * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │ 56 * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │ 57 * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │ 58 * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │ 59 * └─────────────────────────────Device0───────┬──────────────────────┘ 60 * │ 61 * ───────────────────────┴────────── PCI bus 62 */ 63 64 /** 65 * DOC: Xe uAPI Overview 66 * 67 * This section aims to describe the Xe's IOCTL entries, its structs, and other 68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related 69 * entries and usage. 70 * 71 * List of supported IOCTLs: 72 * - &DRM_IOCTL_XE_DEVICE_QUERY 73 * - &DRM_IOCTL_XE_GEM_CREATE 74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET 75 * - &DRM_IOCTL_XE_VM_CREATE 76 * - &DRM_IOCTL_XE_VM_DESTROY 77 * - &DRM_IOCTL_XE_VM_BIND 78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE 79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY 80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY 81 * - &DRM_IOCTL_XE_EXEC 82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE 83 * - &DRM_IOCTL_XE_OBSERVATION 84 */ 85 86 /* 87 * xe specific ioctls. 88 * 89 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 90 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 91 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 92 */ 93 #define DRM_XE_DEVICE_QUERY 0x00 94 #define DRM_XE_GEM_CREATE 0x01 95 #define DRM_XE_GEM_MMAP_OFFSET 0x02 96 #define DRM_XE_VM_CREATE 0x03 97 #define DRM_XE_VM_DESTROY 0x04 98 #define DRM_XE_VM_BIND 0x05 99 #define DRM_XE_EXEC_QUEUE_CREATE 0x06 100 #define DRM_XE_EXEC_QUEUE_DESTROY 0x07 101 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 102 #define DRM_XE_EXEC 0x09 103 #define DRM_XE_WAIT_USER_FENCE 0x0a 104 #define DRM_XE_OBSERVATION 0x0b 105 106 /* Must be kept compact -- no holes */ 107 108 #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) 109 #define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) 110 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) 111 #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) 112 #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) 113 #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) 114 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) 115 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) 116 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) 117 #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) 118 #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) 119 #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) 120 121 /** 122 * DOC: Xe IOCTL Extensions 123 * 124 * Before detailing the IOCTLs and its structs, it is important to highlight 125 * that every IOCTL in Xe is extensible. 126 * 127 * Many interfaces need to grow over time. In most cases we can simply 128 * extend the struct and have userspace pass in more data. Another option, 129 * as demonstrated by Vulkan's approach to providing extensions for forward 130 * and backward compatibility, is to use a list of optional structs to 131 * provide those extra details. 132 * 133 * The key advantage to using an extension chain is that it allows us to 134 * redefine the interface more easily than an ever growing struct of 135 * increasing complexity, and for large parts of that interface to be 136 * entirely optional. The downside is more pointer chasing; chasing across 137 * the __user boundary with pointers encapsulated inside u64. 138 * 139 * Example chaining: 140 * 141 * .. code-block:: C 142 * 143 * struct drm_xe_user_extension ext3 { 144 * .next_extension = 0, // end 145 * .name = ..., 146 * }; 147 * struct drm_xe_user_extension ext2 { 148 * .next_extension = (uintptr_t)&ext3, 149 * .name = ..., 150 * }; 151 * struct drm_xe_user_extension ext1 { 152 * .next_extension = (uintptr_t)&ext2, 153 * .name = ..., 154 * }; 155 * 156 * Typically the struct drm_xe_user_extension would be embedded in some uAPI 157 * struct, and in this case we would feed it the head of the chain(i.e ext1), 158 * which would then apply all of the above extensions. 159 */ 160 161 /** 162 * struct drm_xe_user_extension - Base class for defining a chain of extensions 163 */ 164 struct drm_xe_user_extension { 165 /** 166 * @next_extension: 167 * 168 * Pointer to the next struct drm_xe_user_extension, or zero if the end. 169 */ 170 __u64 next_extension; 171 172 /** 173 * @name: Name of the extension. 174 * 175 * Note that the name here is just some integer. 176 * 177 * Also note that the name space for this is not global for the whole 178 * driver, but rather its scope/meaning is limited to the specific piece 179 * of uAPI which has embedded the struct drm_xe_user_extension. 180 */ 181 __u32 name; 182 183 /** 184 * @pad: MBZ 185 * 186 * All undefined bits must be zero. 187 */ 188 __u32 pad; 189 }; 190 191 /** 192 * struct drm_xe_ext_set_property - Generic set property extension 193 * 194 * A generic struct that allows any of the Xe's IOCTL to be extended 195 * with a set_property operation. 196 */ 197 struct drm_xe_ext_set_property { 198 /** @base: base user extension */ 199 struct drm_xe_user_extension base; 200 201 /** @property: property to set */ 202 __u32 property; 203 204 /** @pad: MBZ */ 205 __u32 pad; 206 207 /** @value: property value */ 208 __u64 value; 209 210 /** @reserved: Reserved */ 211 __u64 reserved[2]; 212 }; 213 214 /** 215 * struct drm_xe_engine_class_instance - instance of an engine class 216 * 217 * It is returned as part of the @drm_xe_engine, but it also is used as 218 * the input of engine selection for both @drm_xe_exec_queue_create and 219 * @drm_xe_query_engine_cycles 220 * 221 * The @engine_class can be: 222 * - %DRM_XE_ENGINE_CLASS_RENDER 223 * - %DRM_XE_ENGINE_CLASS_COPY 224 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE 225 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 226 * - %DRM_XE_ENGINE_CLASS_COMPUTE 227 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual 228 * hardware engine class). Used for creating ordered queues of VM 229 * bind operations. 230 */ 231 struct drm_xe_engine_class_instance { 232 #define DRM_XE_ENGINE_CLASS_RENDER 0 233 #define DRM_XE_ENGINE_CLASS_COPY 1 234 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 235 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 236 #define DRM_XE_ENGINE_CLASS_COMPUTE 4 237 #define DRM_XE_ENGINE_CLASS_VM_BIND 5 238 /** @engine_class: engine class id */ 239 __u16 engine_class; 240 /** @engine_instance: engine instance id */ 241 __u16 engine_instance; 242 /** @gt_id: Unique ID of this GT within the PCI Device */ 243 __u16 gt_id; 244 /** @pad: MBZ */ 245 __u16 pad; 246 }; 247 248 /** 249 * struct drm_xe_engine - describe hardware engine 250 */ 251 struct drm_xe_engine { 252 /** @instance: The @drm_xe_engine_class_instance */ 253 struct drm_xe_engine_class_instance instance; 254 255 /** @reserved: Reserved */ 256 __u64 reserved[3]; 257 }; 258 259 /** 260 * struct drm_xe_query_engines - describe engines 261 * 262 * If a query is made with a struct @drm_xe_device_query where .query 263 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of 264 * struct @drm_xe_query_engines in .data. 265 */ 266 struct drm_xe_query_engines { 267 /** @num_engines: number of engines returned in @engines */ 268 __u32 num_engines; 269 /** @pad: MBZ */ 270 __u32 pad; 271 /** @engines: The returned engines for this device */ 272 struct drm_xe_engine engines[]; 273 }; 274 275 /** 276 * enum drm_xe_memory_class - Supported memory classes. 277 */ 278 enum drm_xe_memory_class { 279 /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ 280 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, 281 /** 282 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this 283 * represents the memory that is local to the device, which we 284 * call VRAM. Not valid on integrated platforms. 285 */ 286 DRM_XE_MEM_REGION_CLASS_VRAM 287 }; 288 289 /** 290 * struct drm_xe_mem_region - Describes some region as known to 291 * the driver. 292 */ 293 struct drm_xe_mem_region { 294 /** 295 * @mem_class: The memory class describing this region. 296 * 297 * See enum drm_xe_memory_class for supported values. 298 */ 299 __u16 mem_class; 300 /** 301 * @instance: The unique ID for this region, which serves as the 302 * index in the placement bitmask used as argument for 303 * &DRM_IOCTL_XE_GEM_CREATE 304 */ 305 __u16 instance; 306 /** 307 * @min_page_size: Min page-size in bytes for this region. 308 * 309 * When the kernel allocates memory for this region, the 310 * underlying pages will be at least @min_page_size in size. 311 * Buffer objects with an allowable placement in this region must be 312 * created with a size aligned to this value. 313 * GPU virtual address mappings of (parts of) buffer objects that 314 * may be placed in this region must also have their GPU virtual 315 * address and range aligned to this value. 316 * Affected IOCTLS will return %-EINVAL if alignment restrictions are 317 * not met. 318 */ 319 __u32 min_page_size; 320 /** 321 * @total_size: The usable size in bytes for this region. 322 */ 323 __u64 total_size; 324 /** 325 * @used: Estimate of the memory used in bytes for this region. 326 * 327 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 328 * accounting. Without this the value here will always equal 329 * zero. 330 */ 331 __u64 used; 332 /** 333 * @cpu_visible_size: How much of this region can be CPU 334 * accessed, in bytes. 335 * 336 * This will always be <= @total_size, and the remainder (if 337 * any) will not be CPU accessible. If the CPU accessible part 338 * is smaller than @total_size then this is referred to as a 339 * small BAR system. 340 * 341 * On systems without small BAR (full BAR), the probed_size will 342 * always equal the @total_size, since all of it will be CPU 343 * accessible. 344 * 345 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM 346 * regions (for other types the value here will always equal 347 * zero). 348 */ 349 __u64 cpu_visible_size; 350 /** 351 * @cpu_visible_used: Estimate of CPU visible memory used, in 352 * bytes. 353 * 354 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 355 * accounting. Without this the value here will always equal 356 * zero. Note this is only currently tracked for 357 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value 358 * here will always be zero). 359 */ 360 __u64 cpu_visible_used; 361 /** @reserved: Reserved */ 362 __u64 reserved[6]; 363 }; 364 365 /** 366 * struct drm_xe_query_mem_regions - describe memory regions 367 * 368 * If a query is made with a struct drm_xe_device_query where .query 369 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses 370 * struct drm_xe_query_mem_regions in .data. 371 */ 372 struct drm_xe_query_mem_regions { 373 /** @num_mem_regions: number of memory regions returned in @mem_regions */ 374 __u32 num_mem_regions; 375 /** @pad: MBZ */ 376 __u32 pad; 377 /** @mem_regions: The returned memory regions for this device */ 378 struct drm_xe_mem_region mem_regions[]; 379 }; 380 381 /** 382 * struct drm_xe_query_config - describe the device configuration 383 * 384 * If a query is made with a struct drm_xe_device_query where .query 385 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses 386 * struct drm_xe_query_config in .data. 387 * 388 * The index in @info can be: 389 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits) 390 * and the device revision (next 8 bits) 391 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device 392 * configuration, see list below 393 * 394 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device 395 * has usable VRAM 396 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment 397 * required by this device, typically SZ_4K or SZ_64K 398 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address 399 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest 400 * available exec queue priority 401 */ 402 struct drm_xe_query_config { 403 /** @num_params: number of parameters returned in info */ 404 __u32 num_params; 405 406 /** @pad: MBZ */ 407 __u32 pad; 408 409 #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 410 #define DRM_XE_QUERY_CONFIG_FLAGS 1 411 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) 412 #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 413 #define DRM_XE_QUERY_CONFIG_VA_BITS 3 414 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 415 /** @info: array of elements containing the config info */ 416 __u64 info[]; 417 }; 418 419 /** 420 * struct drm_xe_gt - describe an individual GT. 421 * 422 * To be used with drm_xe_query_gt_list, which will return a list with all the 423 * existing GT individual descriptions. 424 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for 425 * implementing graphics and/or media operations. 426 * 427 * The index in @type can be: 428 * - %DRM_XE_QUERY_GT_TYPE_MAIN 429 * - %DRM_XE_QUERY_GT_TYPE_MEDIA 430 */ 431 struct drm_xe_gt { 432 #define DRM_XE_QUERY_GT_TYPE_MAIN 0 433 #define DRM_XE_QUERY_GT_TYPE_MEDIA 1 434 /** @type: GT type: Main or Media */ 435 __u16 type; 436 /** @tile_id: Tile ID where this GT lives (Information only) */ 437 __u16 tile_id; 438 /** @gt_id: Unique ID of this GT within the PCI Device */ 439 __u16 gt_id; 440 /** @pad: MBZ */ 441 __u16 pad[3]; 442 /** @reference_clock: A clock frequency for timestamp */ 443 __u32 reference_clock; 444 /** 445 * @near_mem_regions: Bit mask of instances from 446 * drm_xe_query_mem_regions that are nearest to the current engines 447 * of this GT. 448 * Each index in this mask refers directly to the struct 449 * drm_xe_query_mem_regions' instance, no assumptions should 450 * be made about order. The type of each region is described 451 * by struct drm_xe_query_mem_regions' mem_class. 452 */ 453 __u64 near_mem_regions; 454 /** 455 * @far_mem_regions: Bit mask of instances from 456 * drm_xe_query_mem_regions that are far from the engines of this GT. 457 * In general, they have extra indirections when compared to the 458 * @near_mem_regions. For a discrete device this could mean system 459 * memory and memory living in a different tile. 460 * Each index in this mask refers directly to the struct 461 * drm_xe_query_mem_regions' instance, no assumptions should 462 * be made about order. The type of each region is described 463 * by struct drm_xe_query_mem_regions' mem_class. 464 */ 465 __u64 far_mem_regions; 466 /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */ 467 __u16 ip_ver_major; 468 /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */ 469 __u16 ip_ver_minor; 470 /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */ 471 __u16 ip_ver_rev; 472 /** @pad2: MBZ */ 473 __u16 pad2; 474 /** @reserved: Reserved */ 475 __u64 reserved[7]; 476 }; 477 478 /** 479 * struct drm_xe_query_gt_list - A list with GT description items. 480 * 481 * If a query is made with a struct drm_xe_device_query where .query 482 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct 483 * drm_xe_query_gt_list in .data. 484 */ 485 struct drm_xe_query_gt_list { 486 /** @num_gt: number of GT items returned in gt_list */ 487 __u32 num_gt; 488 /** @pad: MBZ */ 489 __u32 pad; 490 /** @gt_list: The GT list returned for this device */ 491 struct drm_xe_gt gt_list[]; 492 }; 493 494 /** 495 * struct drm_xe_query_topology_mask - describe the topology mask of a GT 496 * 497 * This is the hardware topology which reflects the internal physical 498 * structure of the GPU. 499 * 500 * If a query is made with a struct drm_xe_device_query where .query 501 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses 502 * struct drm_xe_query_topology_mask in .data. 503 * 504 * The @type can be: 505 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices 506 * (DSS) available for geometry operations. For example a query response 507 * containing the following in mask: 508 * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00`` 509 * means 32 DSS are available for geometry. 510 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices 511 * (DSS) available for compute operations. For example a query response 512 * containing the following in mask: 513 * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` 514 * means 32 DSS are available for compute. 515 * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type 516 * may be omitted if the driver is unable to query the mask from the 517 * hardware. 518 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) 519 * available per Dual Sub Slices (DSS). For example a query response 520 * containing the following in mask: 521 * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` 522 * means each DSS has 16 SIMD8 EUs. This type may be omitted if device 523 * doesn't have SIMD8 EUs. 524 * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution 525 * Units (EU) available per Dual Sub Slices (DSS). For example a query 526 * response containing the following in mask: 527 * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00`` 528 * means each DSS has 16 SIMD16 EUs. This type may be omitted if device 529 * doesn't have SIMD16 EUs. 530 */ 531 struct drm_xe_query_topology_mask { 532 /** @gt_id: GT ID the mask is associated with */ 533 __u16 gt_id; 534 535 #define DRM_XE_TOPO_DSS_GEOMETRY 1 536 #define DRM_XE_TOPO_DSS_COMPUTE 2 537 #define DRM_XE_TOPO_L3_BANK 3 538 #define DRM_XE_TOPO_EU_PER_DSS 4 539 #define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5 540 /** @type: type of mask */ 541 __u16 type; 542 543 /** @num_bytes: number of bytes in requested mask */ 544 __u32 num_bytes; 545 546 /** @mask: little-endian mask of @num_bytes */ 547 __u8 mask[]; 548 }; 549 550 /** 551 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps 552 * 553 * If a query is made with a struct drm_xe_device_query where .query is equal to 554 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles 555 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and 556 * .data points to this allocated structure. 557 * 558 * The query returns the engine cycles, which along with GT's @reference_clock, 559 * can be used to calculate the engine timestamp. In addition the 560 * query returns a set of cpu timestamps that indicate when the command 561 * streamer cycle count was captured. 562 */ 563 struct drm_xe_query_engine_cycles { 564 /** 565 * @eci: This is input by the user and is the engine for which command 566 * streamer cycles is queried. 567 */ 568 struct drm_xe_engine_class_instance eci; 569 570 /** 571 * @clockid: This is input by the user and is the reference clock id for 572 * CPU timestamp. For definition, see clock_gettime(2) and 573 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, 574 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. 575 */ 576 __s32 clockid; 577 578 /** @width: Width of the engine cycle counter in bits. */ 579 __u32 width; 580 581 /** 582 * @engine_cycles: Engine cycles as read from its register 583 * at 0x358 offset. 584 */ 585 __u64 engine_cycles; 586 587 /** 588 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before 589 * reading the engine_cycles register using the reference clockid set by the 590 * user. 591 */ 592 __u64 cpu_timestamp; 593 594 /** 595 * @cpu_delta: Time delta in ns captured around reading the lower dword 596 * of the engine_cycles register. 597 */ 598 __u64 cpu_delta; 599 }; 600 601 /** 602 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version 603 * 604 * Given a uc_type this will return the branch, major, minor and patch version 605 * of the micro-controller firmware. 606 */ 607 struct drm_xe_query_uc_fw_version { 608 /** @uc_type: The micro-controller type to query firmware version */ 609 #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0 610 #define XE_QUERY_UC_TYPE_HUC 1 611 __u16 uc_type; 612 613 /** @pad: MBZ */ 614 __u16 pad; 615 616 /** @branch_ver: branch uc fw version */ 617 __u32 branch_ver; 618 /** @major_ver: major uc fw version */ 619 __u32 major_ver; 620 /** @minor_ver: minor uc fw version */ 621 __u32 minor_ver; 622 /** @patch_ver: patch uc fw version */ 623 __u32 patch_ver; 624 625 /** @pad2: MBZ */ 626 __u32 pad2; 627 628 /** @reserved: Reserved */ 629 __u64 reserved; 630 }; 631 632 /** 633 * struct drm_xe_query_pxp_status - query if PXP is ready 634 * 635 * If PXP is enabled and no fatal error has occurred, the status will be set to 636 * one of the following values: 637 * 0: PXP init still in progress 638 * 1: PXP init complete 639 * 640 * If PXP is not enabled or something has gone wrong, the query will be failed 641 * with one of the following error codes: 642 * -ENODEV: PXP not supported or disabled; 643 * -EIO: fatal error occurred during init, so PXP will never be enabled; 644 * -EINVAL: incorrect value provided as part of the query; 645 * -EFAULT: error copying the memory between kernel and userspace. 646 * 647 * The status can only be 0 in the first few seconds after driver load. If 648 * everything works as expected, the status will transition to init complete in 649 * less than 1 second, while in case of errors the driver might take longer to 650 * start returning an error code, but it should still take less than 10 seconds. 651 * 652 * The supported session type bitmask is based on the values in 653 * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore 654 * is not reported in the bitmask. 655 * 656 */ 657 struct drm_xe_query_pxp_status { 658 /** @status: current PXP status */ 659 __u32 status; 660 661 /** @supported_session_types: bitmask of supported PXP session types */ 662 __u32 supported_session_types; 663 }; 664 665 /** 666 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main 667 * structure to query device information 668 * 669 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_* 670 * and sets the value in the query member. This determines the type of 671 * the structure provided by the driver in data, among struct drm_xe_query_*. 672 * 673 * The @query can be: 674 * - %DRM_XE_DEVICE_QUERY_ENGINES 675 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS 676 * - %DRM_XE_DEVICE_QUERY_CONFIG 677 * - %DRM_XE_DEVICE_QUERY_GT_LIST 678 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware 679 * configuration of the device such as information on slices, memory, 680 * caches, and so on. It is provided as a table of key / value 681 * attributes. 682 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 683 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 684 * - %DRM_XE_DEVICE_QUERY_PXP_STATUS 685 * 686 * If size is set to 0, the driver fills it with the required size for 687 * the requested type of data to query. If size is equal to the required 688 * size, the queried information is copied into data. If size is set to 689 * a value different from 0 and different from the required size, the 690 * IOCTL call returns -EINVAL. 691 * 692 * For example the following code snippet allows retrieving and printing 693 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES: 694 * 695 * .. code-block:: C 696 * 697 * struct drm_xe_query_engines *engines; 698 * struct drm_xe_device_query query = { 699 * .extensions = 0, 700 * .query = DRM_XE_DEVICE_QUERY_ENGINES, 701 * .size = 0, 702 * .data = 0, 703 * }; 704 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); 705 * engines = malloc(query.size); 706 * query.data = (uintptr_t)engines; 707 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); 708 * for (int i = 0; i < engines->num_engines; i++) { 709 * printf("Engine %d: %s\n", i, 710 * engines->engines[i].instance.engine_class == 711 * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": 712 * engines->engines[i].instance.engine_class == 713 * DRM_XE_ENGINE_CLASS_COPY ? "COPY": 714 * engines->engines[i].instance.engine_class == 715 * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": 716 * engines->engines[i].instance.engine_class == 717 * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": 718 * engines->engines[i].instance.engine_class == 719 * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": 720 * "UNKNOWN"); 721 * } 722 * free(engines); 723 */ 724 struct drm_xe_device_query { 725 /** @extensions: Pointer to the first extension struct, if any */ 726 __u64 extensions; 727 728 #define DRM_XE_DEVICE_QUERY_ENGINES 0 729 #define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 730 #define DRM_XE_DEVICE_QUERY_CONFIG 2 731 #define DRM_XE_DEVICE_QUERY_GT_LIST 3 732 #define DRM_XE_DEVICE_QUERY_HWCONFIG 4 733 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 734 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 735 #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7 736 #define DRM_XE_DEVICE_QUERY_OA_UNITS 8 737 #define DRM_XE_DEVICE_QUERY_PXP_STATUS 9 738 /** @query: The type of data to query */ 739 __u32 query; 740 741 /** @size: Size of the queried data */ 742 __u32 size; 743 744 /** @data: Queried data is placed here */ 745 __u64 data; 746 747 /** @reserved: Reserved */ 748 __u64 reserved[2]; 749 }; 750 751 /** 752 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for 753 * gem creation 754 * 755 * The @flags can be: 756 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING 757 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT 758 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a 759 * possible placement, ensure that the corresponding VRAM allocation 760 * will always use the CPU accessible part of VRAM. This is important 761 * for small-bar systems (on full-bar systems this gets turned into a 762 * noop). 763 * Note1: System memory can be used as an extra placement if the kernel 764 * should spill the allocation to system memory, if space can't be made 765 * available in the CPU accessible part of VRAM (giving the same 766 * behaviour as the i915 interface, see 767 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS). 768 * Note2: For clear-color CCS surfaces the kernel needs to read the 769 * clear-color value stored in the buffer, and on discrete platforms we 770 * need to use VRAM for display surfaces, therefore the kernel requires 771 * setting this flag for such objects, otherwise an error is thrown on 772 * small-bar systems. 773 * 774 * @cpu_caching supports the following values: 775 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back 776 * caching. On iGPU this can't be used for scanout surfaces. Currently 777 * not allowed for objects placed in VRAM. 778 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This 779 * is uncached. Scanout surfaces should likely use this. All objects 780 * that can be placed in VRAM must use this. 781 * 782 * This ioctl supports setting the following properties via the 783 * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the 784 * generic @drm_xe_ext_set_property struct: 785 * 786 * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session 787 * this object will be used with. Valid values are listed in enum 788 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so 789 * there is no need to explicitly set that. Objects used with session of type 790 * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation 791 * event occurs after their creation. Attempting to flip an invalid object 792 * will cause a black frame to be displayed instead. Submissions with invalid 793 * objects mapped in the VM will be rejected. 794 */ 795 struct drm_xe_gem_create { 796 #define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0 797 #define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0 798 /** @extensions: Pointer to the first extension struct, if any */ 799 __u64 extensions; 800 801 /** 802 * @size: Size of the object to be created, must match region 803 * (system or vram) minimum alignment (&min_page_size). 804 */ 805 __u64 size; 806 807 /** 808 * @placement: A mask of memory instances of where BO can be placed. 809 * Each index in this mask refers directly to the struct 810 * drm_xe_query_mem_regions' instance, no assumptions should 811 * be made about order. The type of each region is described 812 * by struct drm_xe_query_mem_regions' mem_class. 813 */ 814 __u32 placement; 815 816 #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0) 817 #define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1) 818 #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2) 819 /** 820 * @flags: Flags, currently a mask of memory instances of where BO can 821 * be placed 822 */ 823 __u32 flags; 824 825 /** 826 * @vm_id: Attached VM, if any 827 * 828 * If a VM is specified, this BO must: 829 * 830 * 1. Only ever be bound to that VM. 831 * 2. Cannot be exported as a PRIME fd. 832 */ 833 __u32 vm_id; 834 835 /** 836 * @handle: Returned handle for the object. 837 * 838 * Object handles are nonzero. 839 */ 840 __u32 handle; 841 842 #define DRM_XE_GEM_CPU_CACHING_WB 1 843 #define DRM_XE_GEM_CPU_CACHING_WC 2 844 /** 845 * @cpu_caching: The CPU caching mode to select for this object. If 846 * mmaping the object the mode selected here will also be used. The 847 * exception is when mapping system memory (including data evicted 848 * to system) on discrete GPUs. The caching mode selected will 849 * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency 850 * between GPU- and CPU is guaranteed. The caching mode of 851 * existing CPU-mappings will be updated transparently to 852 * user-space clients. 853 */ 854 __u16 cpu_caching; 855 /** @pad: MBZ */ 856 __u16 pad[3]; 857 858 /** @reserved: Reserved */ 859 __u64 reserved[2]; 860 }; 861 862 /** 863 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET 864 * 865 * The @flags can be: 866 * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset 867 * for use in mmap ioctl. Writing to the returned mmap address will generate a 868 * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing 869 * to VRAM which would also add overhead), acting like an MI_MEM_FENCE 870 * instruction. 871 * 872 * Note: The mmap size can be at most 4K, due to HW limitations. As a result 873 * this interface is only supported on CPU architectures that support 4K page 874 * size. The mmap_offset ioctl will detect this and gracefully return an 875 * error, where userspace is expected to have a different fallback method for 876 * triggering a barrier. 877 * 878 * Roughly the usage would be as follows: 879 * 880 * .. code-block:: C 881 * 882 * struct drm_xe_gem_mmap_offset mmo = { 883 * .handle = 0, // must be set to 0 884 * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, 885 * }; 886 * 887 * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); 888 * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); 889 * map[i] = 0xdeadbeaf; // issue barrier 890 */ 891 struct drm_xe_gem_mmap_offset { 892 /** @extensions: Pointer to the first extension struct, if any */ 893 __u64 extensions; 894 895 /** @handle: Handle for the object being mapped. */ 896 __u32 handle; 897 898 #define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0) 899 /** @flags: Flags */ 900 __u32 flags; 901 902 /** @offset: The fake offset to use for subsequent mmap call */ 903 __u64 offset; 904 905 /** @reserved: Reserved */ 906 __u64 reserved[2]; 907 }; 908 909 /** 910 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE 911 * 912 * The @flags can be: 913 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE 914 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts 915 * exec submissions to its exec_queues that don't have an upper time 916 * limit on the job execution time. But exec submissions to these 917 * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, 918 * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, 919 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. 920 * LR VMs can be created in recoverable page-fault mode using 921 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. 922 * If that flag is omitted, the UMD can not rely on the slightly 923 * different per-VM overcommit semantics that are enabled by 924 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may 925 * still enable recoverable pagefaults if supported by the device. 926 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also 927 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on 928 * demand when accessed, and also allows per-VM overcommit of memory. 929 * The xe driver internally uses recoverable pagefaults to implement 930 * this. 931 */ 932 struct drm_xe_vm_create { 933 /** @extensions: Pointer to the first extension struct, if any */ 934 __u64 extensions; 935 936 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) 937 #define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1) 938 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2) 939 /** @flags: Flags */ 940 __u32 flags; 941 942 /** @vm_id: Returned VM ID */ 943 __u32 vm_id; 944 945 /** @reserved: Reserved */ 946 __u64 reserved[2]; 947 }; 948 949 /** 950 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY 951 */ 952 struct drm_xe_vm_destroy { 953 /** @vm_id: VM ID */ 954 __u32 vm_id; 955 956 /** @pad: MBZ */ 957 __u32 pad; 958 959 /** @reserved: Reserved */ 960 __u64 reserved[2]; 961 }; 962 963 /** 964 * struct drm_xe_vm_bind_op - run bind operations 965 * 966 * The @op can be: 967 * - %DRM_XE_VM_BIND_OP_MAP 968 * - %DRM_XE_VM_BIND_OP_UNMAP 969 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR 970 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL 971 * - %DRM_XE_VM_BIND_OP_PREFETCH 972 * 973 * and the @flags can be: 974 * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only 975 * to ensure write protection 976 * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the 977 * MAP operation immediately rather than deferring the MAP to the page 978 * fault handler. This is implied on a non-faulting VM as there is no 979 * fault handler to defer to. 980 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page 981 * tables are setup with a special bit which indicates writes are 982 * dropped and all reads return zero. In the future, the NULL flags 983 * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO 984 * handle MBZ, and the BO offset MBZ. This flag is intended to 985 * implement VK sparse bindings. 986 * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP, 987 * reject the binding if the encryption key is no longer valid. This 988 * flag has no effect on BOs that are not marked as using PXP. 989 */ 990 struct drm_xe_vm_bind_op { 991 /** @extensions: Pointer to the first extension struct, if any */ 992 __u64 extensions; 993 994 /** 995 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP 996 */ 997 __u32 obj; 998 999 /** 1000 * @pat_index: The platform defined @pat_index to use for this mapping. 1001 * The index basically maps to some predefined memory attributes, 1002 * including things like caching, coherency, compression etc. The exact 1003 * meaning of the pat_index is platform specific and defined in the 1004 * Bspec and PRMs. When the KMD sets up the binding the index here is 1005 * encoded into the ppGTT PTE. 1006 * 1007 * For coherency the @pat_index needs to be at least 1way coherent when 1008 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD 1009 * will extract the coherency mode from the @pat_index and reject if 1010 * there is a mismatch (see note below for pre-MTL platforms). 1011 * 1012 * Note: On pre-MTL platforms there is only a caching mode and no 1013 * explicit coherency mode, but on such hardware there is always a 1014 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with 1015 * CPU caches even with the caching mode set as uncached. It's only the 1016 * display engine that is incoherent (on dgpu it must be in VRAM which 1017 * is always mapped as WC on the CPU). However to keep the uapi somewhat 1018 * consistent with newer platforms the KMD groups the different cache 1019 * levels into the following coherency buckets on all pre-MTL platforms: 1020 * 1021 * ppGTT UC -> COH_NONE 1022 * ppGTT WC -> COH_NONE 1023 * ppGTT WT -> COH_NONE 1024 * ppGTT WB -> COH_AT_LEAST_1WAY 1025 * 1026 * In practice UC/WC/WT should only ever used for scanout surfaces on 1027 * such platforms (or perhaps in general for dma-buf if shared with 1028 * another device) since it is only the display engine that is actually 1029 * incoherent. Everything else should typically use WB given that we 1030 * have a shared-LLC. On MTL+ this completely changes and the HW 1031 * defines the coherency mode as part of the @pat_index, where 1032 * incoherent GT access is possible. 1033 * 1034 * Note: For userptr and externally imported dma-buf the kernel expects 1035 * either 1WAY or 2WAY for the @pat_index. 1036 * 1037 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions 1038 * on the @pat_index. For such mappings there is no actual memory being 1039 * mapped (the address in the PTE is invalid), so the various PAT memory 1040 * attributes likely do not apply. Simply leaving as zero is one 1041 * option (still a valid pat_index). 1042 */ 1043 __u16 pat_index; 1044 1045 /** @pad: MBZ */ 1046 __u16 pad; 1047 1048 union { 1049 /** 1050 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, 1051 * ignored for unbind 1052 */ 1053 __u64 obj_offset; 1054 1055 /** @userptr: user pointer to bind on */ 1056 __u64 userptr; 1057 }; 1058 1059 /** 1060 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL 1061 */ 1062 __u64 range; 1063 1064 /** @addr: Address to operate on, MBZ for UNMAP_ALL */ 1065 __u64 addr; 1066 1067 #define DRM_XE_VM_BIND_OP_MAP 0x0 1068 #define DRM_XE_VM_BIND_OP_UNMAP 0x1 1069 #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 1070 #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 1071 #define DRM_XE_VM_BIND_OP_PREFETCH 0x4 1072 /** @op: Bind operation to perform */ 1073 __u32 op; 1074 1075 #define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) 1076 #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) 1077 #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) 1078 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) 1079 #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) 1080 /** @flags: Bind flags */ 1081 __u32 flags; 1082 1083 /** 1084 * @prefetch_mem_region_instance: Memory region to prefetch VMA to. 1085 * It is a region instance, not a mask. 1086 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. 1087 */ 1088 __u32 prefetch_mem_region_instance; 1089 1090 /** @pad2: MBZ */ 1091 __u32 pad2; 1092 1093 /** @reserved: Reserved */ 1094 __u64 reserved[3]; 1095 }; 1096 1097 /** 1098 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND 1099 * 1100 * Below is an example of a minimal use of @drm_xe_vm_bind to 1101 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to 1102 * illustrate `userptr`. It can be synchronized by using the example 1103 * provided for @drm_xe_sync. 1104 * 1105 * .. code-block:: C 1106 * 1107 * data = aligned_alloc(ALIGNMENT, BO_SIZE); 1108 * struct drm_xe_vm_bind bind = { 1109 * .vm_id = vm, 1110 * .num_binds = 1, 1111 * .bind.obj = 0, 1112 * .bind.obj_offset = to_user_pointer(data), 1113 * .bind.range = BO_SIZE, 1114 * .bind.addr = BIND_ADDRESS, 1115 * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR, 1116 * .bind.flags = 0, 1117 * .num_syncs = 1, 1118 * .syncs = &sync, 1119 * .exec_queue_id = 0, 1120 * }; 1121 * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind); 1122 * 1123 */ 1124 struct drm_xe_vm_bind { 1125 /** @extensions: Pointer to the first extension struct, if any */ 1126 __u64 extensions; 1127 1128 /** @vm_id: The ID of the VM to bind to */ 1129 __u32 vm_id; 1130 1131 /** 1132 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND 1133 * and exec queue must have same vm_id. If zero, the default VM bind engine 1134 * is used. 1135 */ 1136 __u32 exec_queue_id; 1137 1138 /** @pad: MBZ */ 1139 __u32 pad; 1140 1141 /** @num_binds: number of binds in this IOCTL */ 1142 __u32 num_binds; 1143 1144 union { 1145 /** @bind: used if num_binds == 1 */ 1146 struct drm_xe_vm_bind_op bind; 1147 1148 /** 1149 * @vector_of_binds: userptr to array of struct 1150 * drm_xe_vm_bind_op if num_binds > 1 1151 */ 1152 __u64 vector_of_binds; 1153 }; 1154 1155 /** @pad2: MBZ */ 1156 __u32 pad2; 1157 1158 /** @num_syncs: amount of syncs to wait on */ 1159 __u32 num_syncs; 1160 1161 /** @syncs: pointer to struct drm_xe_sync array */ 1162 __u64 syncs; 1163 1164 /** @reserved: Reserved */ 1165 __u64 reserved[2]; 1166 }; 1167 1168 /** 1169 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE 1170 * 1171 * This ioctl supports setting the following properties via the 1172 * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the 1173 * generic @drm_xe_ext_set_property struct: 1174 * 1175 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority. 1176 * CAP_SYS_NICE is required to set a value above normal. 1177 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice 1178 * duration in microseconds. 1179 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session 1180 * this queue will be used with. Valid values are listed in enum 1181 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so 1182 * there is no need to explicitly set that. When a queue of type 1183 * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session 1184 * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running. 1185 * Given that going into a power-saving state kills PXP HWDRM sessions, 1186 * runtime PM will be blocked while queues of this type are alive. 1187 * All PXP queues will be killed if a PXP invalidation event occurs. 1188 * 1189 * The example below shows how to use @drm_xe_exec_queue_create to create 1190 * a simple exec_queue (no parallel submission) of class 1191 * &DRM_XE_ENGINE_CLASS_RENDER. 1192 * 1193 * .. code-block:: C 1194 * 1195 * struct drm_xe_engine_class_instance instance = { 1196 * .engine_class = DRM_XE_ENGINE_CLASS_RENDER, 1197 * }; 1198 * struct drm_xe_exec_queue_create exec_queue_create = { 1199 * .extensions = 0, 1200 * .vm_id = vm, 1201 * .num_bb_per_exec = 1, 1202 * .num_eng_per_bb = 1, 1203 * .instances = to_user_pointer(&instance), 1204 * }; 1205 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); 1206 * 1207 */ 1208 struct drm_xe_exec_queue_create { 1209 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 1210 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 1211 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 1212 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2 1213 /** @extensions: Pointer to the first extension struct, if any */ 1214 __u64 extensions; 1215 1216 /** @width: submission width (number BB per exec) for this exec queue */ 1217 __u16 width; 1218 1219 /** @num_placements: number of valid placements for this exec queue */ 1220 __u16 num_placements; 1221 1222 /** @vm_id: VM to use for this exec queue */ 1223 __u32 vm_id; 1224 1225 /** @flags: MBZ */ 1226 __u32 flags; 1227 1228 /** @exec_queue_id: Returned exec queue ID */ 1229 __u32 exec_queue_id; 1230 1231 /** 1232 * @instances: user pointer to a 2-d array of struct 1233 * drm_xe_engine_class_instance 1234 * 1235 * length = width (i) * num_placements (j) 1236 * index = j + i * width 1237 */ 1238 __u64 instances; 1239 1240 /** @reserved: Reserved */ 1241 __u64 reserved[2]; 1242 }; 1243 1244 /** 1245 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY 1246 */ 1247 struct drm_xe_exec_queue_destroy { 1248 /** @exec_queue_id: Exec queue ID */ 1249 __u32 exec_queue_id; 1250 1251 /** @pad: MBZ */ 1252 __u32 pad; 1253 1254 /** @reserved: Reserved */ 1255 __u64 reserved[2]; 1256 }; 1257 1258 /** 1259 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY 1260 * 1261 * The @property can be: 1262 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 1263 */ 1264 struct drm_xe_exec_queue_get_property { 1265 /** @extensions: Pointer to the first extension struct, if any */ 1266 __u64 extensions; 1267 1268 /** @exec_queue_id: Exec queue ID */ 1269 __u32 exec_queue_id; 1270 1271 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 1272 /** @property: property to get */ 1273 __u32 property; 1274 1275 /** @value: property value */ 1276 __u64 value; 1277 1278 /** @reserved: Reserved */ 1279 __u64 reserved[2]; 1280 }; 1281 1282 /** 1283 * struct drm_xe_sync - sync object 1284 * 1285 * The @type can be: 1286 * - %DRM_XE_SYNC_TYPE_SYNCOBJ 1287 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 1288 * - %DRM_XE_SYNC_TYPE_USER_FENCE 1289 * 1290 * and the @flags can be: 1291 * - %DRM_XE_SYNC_FLAG_SIGNAL 1292 * 1293 * A minimal use of @drm_xe_sync looks like this: 1294 * 1295 * .. code-block:: C 1296 * 1297 * struct drm_xe_sync sync = { 1298 * .flags = DRM_XE_SYNC_FLAG_SIGNAL, 1299 * .type = DRM_XE_SYNC_TYPE_SYNCOBJ, 1300 * }; 1301 * struct drm_syncobj_create syncobj_create = { 0 }; 1302 * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create); 1303 * sync.handle = syncobj_create.handle; 1304 * ... 1305 * use of &sync in drm_xe_exec or drm_xe_vm_bind 1306 * ... 1307 * struct drm_syncobj_wait wait = { 1308 * .handles = &sync.handle, 1309 * .timeout_nsec = INT64_MAX, 1310 * .count_handles = 1, 1311 * .flags = 0, 1312 * .first_signaled = 0, 1313 * .pad = 0, 1314 * }; 1315 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); 1316 */ 1317 struct drm_xe_sync { 1318 /** @extensions: Pointer to the first extension struct, if any */ 1319 __u64 extensions; 1320 1321 #define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0 1322 #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1 1323 #define DRM_XE_SYNC_TYPE_USER_FENCE 0x2 1324 /** @type: Type of the this sync object */ 1325 __u32 type; 1326 1327 #define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0) 1328 /** @flags: Sync Flags */ 1329 __u32 flags; 1330 1331 union { 1332 /** @handle: Handle for the object */ 1333 __u32 handle; 1334 1335 /** 1336 * @addr: Address of user fence. When sync is passed in via exec 1337 * IOCTL this is a GPU address in the VM. When sync passed in via 1338 * VM bind IOCTL this is a user pointer. In either case, it is 1339 * the users responsibility that this address is present and 1340 * mapped when the user fence is signalled. Must be qword 1341 * aligned. 1342 */ 1343 __u64 addr; 1344 }; 1345 1346 /** 1347 * @timeline_value: Input for the timeline sync object. Needs to be 1348 * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. 1349 */ 1350 __u64 timeline_value; 1351 1352 /** @reserved: Reserved */ 1353 __u64 reserved[2]; 1354 }; 1355 1356 /** 1357 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC 1358 * 1359 * This is an example to use @drm_xe_exec for execution of the object 1360 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue 1361 * (see example in @drm_xe_exec_queue_create). It can be synchronized 1362 * by using the example provided for @drm_xe_sync. 1363 * 1364 * .. code-block:: C 1365 * 1366 * struct drm_xe_exec exec = { 1367 * .exec_queue_id = exec_queue, 1368 * .syncs = &sync, 1369 * .num_syncs = 1, 1370 * .address = BIND_ADDRESS, 1371 * .num_batch_buffer = 1, 1372 * }; 1373 * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec); 1374 * 1375 */ 1376 struct drm_xe_exec { 1377 /** @extensions: Pointer to the first extension struct, if any */ 1378 __u64 extensions; 1379 1380 /** @exec_queue_id: Exec queue ID for the batch buffer */ 1381 __u32 exec_queue_id; 1382 1383 /** @num_syncs: Amount of struct drm_xe_sync in array. */ 1384 __u32 num_syncs; 1385 1386 /** @syncs: Pointer to struct drm_xe_sync array. */ 1387 __u64 syncs; 1388 1389 /** 1390 * @address: address of batch buffer if num_batch_buffer == 1 or an 1391 * array of batch buffer addresses 1392 */ 1393 __u64 address; 1394 1395 /** 1396 * @num_batch_buffer: number of batch buffer in this exec, must match 1397 * the width of the engine 1398 */ 1399 __u16 num_batch_buffer; 1400 1401 /** @pad: MBZ */ 1402 __u16 pad[3]; 1403 1404 /** @reserved: Reserved */ 1405 __u64 reserved[2]; 1406 }; 1407 1408 /** 1409 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE 1410 * 1411 * Wait on user fence, XE will wake-up on every HW engine interrupt in the 1412 * instances list and check if user fence is complete:: 1413 * 1414 * (*addr & MASK) OP (VALUE & MASK) 1415 * 1416 * Returns to user on user fence completion or timeout. 1417 * 1418 * The @op can be: 1419 * - %DRM_XE_UFENCE_WAIT_OP_EQ 1420 * - %DRM_XE_UFENCE_WAIT_OP_NEQ 1421 * - %DRM_XE_UFENCE_WAIT_OP_GT 1422 * - %DRM_XE_UFENCE_WAIT_OP_GTE 1423 * - %DRM_XE_UFENCE_WAIT_OP_LT 1424 * - %DRM_XE_UFENCE_WAIT_OP_LTE 1425 * 1426 * and the @flags can be: 1427 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME 1428 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP 1429 * 1430 * The @mask values can be for example: 1431 * - 0xffu for u8 1432 * - 0xffffu for u16 1433 * - 0xffffffffu for u32 1434 * - 0xffffffffffffffffu for u64 1435 */ 1436 struct drm_xe_wait_user_fence { 1437 /** @extensions: Pointer to the first extension struct, if any */ 1438 __u64 extensions; 1439 1440 /** 1441 * @addr: user pointer address to wait on, must qword aligned 1442 */ 1443 __u64 addr; 1444 1445 #define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 1446 #define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 1447 #define DRM_XE_UFENCE_WAIT_OP_GT 0x2 1448 #define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 1449 #define DRM_XE_UFENCE_WAIT_OP_LT 0x4 1450 #define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 1451 /** @op: wait operation (type of comparison) */ 1452 __u16 op; 1453 1454 #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0) 1455 /** @flags: wait flags */ 1456 __u16 flags; 1457 1458 /** @pad: MBZ */ 1459 __u32 pad; 1460 1461 /** @value: compare value */ 1462 __u64 value; 1463 1464 /** @mask: comparison mask */ 1465 __u64 mask; 1466 1467 /** 1468 * @timeout: how long to wait before bailing, value in nanoseconds. 1469 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) 1470 * it contains timeout expressed in nanoseconds to wait (fence will 1471 * expire at now() + timeout). 1472 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait 1473 * will end at timeout (uses system MONOTONIC_CLOCK). 1474 * Passing negative timeout leads to neverending wait. 1475 * 1476 * On relative timeout this value is updated with timeout left 1477 * (for restarting the call in case of signal delivery). 1478 * On absolute timeout this value stays intact (restarted call still 1479 * expire at the same point of time). 1480 */ 1481 __s64 timeout; 1482 1483 /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */ 1484 __u32 exec_queue_id; 1485 1486 /** @pad2: MBZ */ 1487 __u32 pad2; 1488 1489 /** @reserved: Reserved */ 1490 __u64 reserved[2]; 1491 }; 1492 1493 /** 1494 * enum drm_xe_observation_type - Observation stream types 1495 */ 1496 enum drm_xe_observation_type { 1497 /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ 1498 DRM_XE_OBSERVATION_TYPE_OA, 1499 }; 1500 1501 /** 1502 * enum drm_xe_observation_op - Observation stream ops 1503 */ 1504 enum drm_xe_observation_op { 1505 /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */ 1506 DRM_XE_OBSERVATION_OP_STREAM_OPEN, 1507 1508 /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */ 1509 DRM_XE_OBSERVATION_OP_ADD_CONFIG, 1510 1511 /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */ 1512 DRM_XE_OBSERVATION_OP_REMOVE_CONFIG, 1513 }; 1514 1515 /** 1516 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION 1517 * 1518 * The observation layer enables multiplexing observation streams of 1519 * multiple types. The actual params for a particular stream operation are 1520 * supplied via the @param pointer (use __copy_from_user to get these 1521 * params). 1522 */ 1523 struct drm_xe_observation_param { 1524 /** @extensions: Pointer to the first extension struct, if any */ 1525 __u64 extensions; 1526 /** @observation_type: observation stream type, of enum @drm_xe_observation_type */ 1527 __u64 observation_type; 1528 /** @observation_op: observation stream op, of enum @drm_xe_observation_op */ 1529 __u64 observation_op; 1530 /** @param: Pointer to actual stream params */ 1531 __u64 param; 1532 }; 1533 1534 /** 1535 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's 1536 * 1537 * Information exchanged between userspace and kernel for observation fd 1538 * ioctl's is stream type specific 1539 */ 1540 enum drm_xe_observation_ioctls { 1541 /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */ 1542 DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0), 1543 1544 /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */ 1545 DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1), 1546 1547 /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */ 1548 DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2), 1549 1550 /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */ 1551 DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3), 1552 1553 /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */ 1554 DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4), 1555 }; 1556 1557 /** 1558 * enum drm_xe_oa_unit_type - OA unit types 1559 */ 1560 enum drm_xe_oa_unit_type { 1561 /** 1562 * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered 1563 * sub-types of OAG. For OAR/OAC, use OAG. 1564 */ 1565 DRM_XE_OA_UNIT_TYPE_OAG, 1566 1567 /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */ 1568 DRM_XE_OA_UNIT_TYPE_OAM, 1569 }; 1570 1571 /** 1572 * struct drm_xe_oa_unit - describe OA unit 1573 */ 1574 struct drm_xe_oa_unit { 1575 /** @extensions: Pointer to the first extension struct, if any */ 1576 __u64 extensions; 1577 1578 /** @oa_unit_id: OA unit ID */ 1579 __u32 oa_unit_id; 1580 1581 /** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */ 1582 __u32 oa_unit_type; 1583 1584 /** @capabilities: OA capabilities bit-mask */ 1585 __u64 capabilities; 1586 #define DRM_XE_OA_CAPS_BASE (1 << 0) 1587 #define DRM_XE_OA_CAPS_SYNCS (1 << 1) 1588 #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) 1589 #define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) 1590 1591 /** @oa_timestamp_freq: OA timestamp freq */ 1592 __u64 oa_timestamp_freq; 1593 1594 /** @reserved: MBZ */ 1595 __u64 reserved[4]; 1596 1597 /** @num_engines: number of engines in @eci array */ 1598 __u64 num_engines; 1599 1600 /** @eci: engines attached to this OA unit */ 1601 struct drm_xe_engine_class_instance eci[]; 1602 }; 1603 1604 /** 1605 * struct drm_xe_query_oa_units - describe OA units 1606 * 1607 * If a query is made with a struct drm_xe_device_query where .query 1608 * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct 1609 * drm_xe_query_oa_units in .data. 1610 * 1611 * OA unit properties for all OA units can be accessed using a code block 1612 * such as the one below: 1613 * 1614 * .. code-block:: C 1615 * 1616 * struct drm_xe_query_oa_units *qoa; 1617 * struct drm_xe_oa_unit *oau; 1618 * u8 *poau; 1619 * 1620 * // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then: 1621 * poau = (u8 *)&qoa->oa_units[0]; 1622 * for (int i = 0; i < qoa->num_oa_units; i++) { 1623 * oau = (struct drm_xe_oa_unit *)poau; 1624 * // Access 'struct drm_xe_oa_unit' fields here 1625 * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]); 1626 * } 1627 */ 1628 struct drm_xe_query_oa_units { 1629 /** @extensions: Pointer to the first extension struct, if any */ 1630 __u64 extensions; 1631 /** @num_oa_units: number of OA units returned in oau[] */ 1632 __u32 num_oa_units; 1633 /** @pad: MBZ */ 1634 __u32 pad; 1635 /** 1636 * @oa_units: struct @drm_xe_oa_unit array returned for this device. 1637 * Written below as a u64 array to avoid problems with nested flexible 1638 * arrays with some compilers 1639 */ 1640 __u64 oa_units[]; 1641 }; 1642 1643 /** 1644 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec 1645 * 52198/60942 1646 */ 1647 enum drm_xe_oa_format_type { 1648 /** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */ 1649 DRM_XE_OA_FMT_TYPE_OAG, 1650 /** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */ 1651 DRM_XE_OA_FMT_TYPE_OAR, 1652 /** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */ 1653 DRM_XE_OA_FMT_TYPE_OAM, 1654 /** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */ 1655 DRM_XE_OA_FMT_TYPE_OAC, 1656 /** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */ 1657 DRM_XE_OA_FMT_TYPE_OAM_MPEC, 1658 /** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */ 1659 DRM_XE_OA_FMT_TYPE_PEC, 1660 }; 1661 1662 /** 1663 * enum drm_xe_oa_property_id - OA stream property id's 1664 * 1665 * Stream params are specified as a chain of @drm_xe_ext_set_property 1666 * struct's, with @property values from enum @drm_xe_oa_property_id and 1667 * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY. 1668 * @param field in struct @drm_xe_observation_param points to the first 1669 * @drm_xe_ext_set_property struct. 1670 * 1671 * Exactly the same mechanism is also used for stream reconfiguration using the 1672 * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a 1673 * subset of properties below can be specified for stream reconfiguration. 1674 */ 1675 enum drm_xe_oa_property_id { 1676 #define DRM_XE_OA_EXTENSION_SET_PROPERTY 0 1677 /** 1678 * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open 1679 * the OA stream, see @oa_unit_id in 'struct 1680 * drm_xe_query_oa_units'. Defaults to 0 if not provided. 1681 */ 1682 DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1, 1683 1684 /** 1685 * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw 1686 * OA unit reports or stream samples in a global buffer attached to an 1687 * OA unit. 1688 */ 1689 DRM_XE_OA_PROPERTY_SAMPLE_OA, 1690 1691 /** 1692 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA 1693 * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG. 1694 */ 1695 DRM_XE_OA_PROPERTY_OA_METRIC_SET, 1696 1697 /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */ 1698 DRM_XE_OA_PROPERTY_OA_FORMAT, 1699 /* 1700 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942, 1701 * in terms of the following quantities: a. enum @drm_xe_oa_format_type 1702 * b. Counter select c. Counter size and d. BC report. Also refer to the 1703 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. 1704 */ 1705 #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) 1706 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) 1707 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) 1708 #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) 1709 1710 /** 1711 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit 1712 * sampling with sampling frequency proportional to 2^(period_exponent + 1) 1713 */ 1714 DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT, 1715 1716 /** 1717 * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA 1718 * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE). 1719 */ 1720 DRM_XE_OA_PROPERTY_OA_DISABLED, 1721 1722 /** 1723 * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific 1724 * @exec_queue_id. OA queries can be executed on this exec queue. 1725 */ 1726 DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID, 1727 1728 /** 1729 * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to 1730 * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0. 1731 */ 1732 DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE, 1733 1734 /** 1735 * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing 1736 * to be disabled for the stream exec queue. 1737 */ 1738 DRM_XE_OA_PROPERTY_NO_PREEMPT, 1739 1740 /** 1741 * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array 1742 * specified in @DRM_XE_OA_PROPERTY_SYNCS 1743 */ 1744 DRM_XE_OA_PROPERTY_NUM_SYNCS, 1745 1746 /** 1747 * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array 1748 * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA 1749 * configuration will wait till input fences signal. Output fences 1750 * will signal after the new OA configuration takes effect. For 1751 * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar 1752 * to the VM bind case. 1753 */ 1754 DRM_XE_OA_PROPERTY_SYNCS, 1755 1756 /** 1757 * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be 1758 * allocated by the driver in bytes. Supported sizes are powers of 1759 * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA 1760 * buffer is allocated by default. 1761 */ 1762 DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE, 1763 1764 /** 1765 * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait 1766 * for before unblocking poll or read 1767 */ 1768 DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS, 1769 }; 1770 1771 /** 1772 * struct drm_xe_oa_config - OA metric configuration 1773 * 1774 * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A 1775 * particular config can be specified when opening an OA stream using 1776 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property. 1777 */ 1778 struct drm_xe_oa_config { 1779 /** @extensions: Pointer to the first extension struct, if any */ 1780 __u64 extensions; 1781 1782 /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */ 1783 char uuid[36]; 1784 1785 /** @n_regs: Number of regs in @regs_ptr */ 1786 __u32 n_regs; 1787 1788 /** 1789 * @regs_ptr: Pointer to (register address, value) pairs for OA config 1790 * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs). 1791 */ 1792 __u64 regs_ptr; 1793 }; 1794 1795 /** 1796 * struct drm_xe_oa_stream_status - OA stream status returned from 1797 * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can 1798 * call the ioctl to query stream status in response to EIO errno from 1799 * observation fd read(). 1800 */ 1801 struct drm_xe_oa_stream_status { 1802 /** @extensions: Pointer to the first extension struct, if any */ 1803 __u64 extensions; 1804 1805 /** @oa_status: OA stream status (see Bspec 46717/61226) */ 1806 __u64 oa_status; 1807 #define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3) 1808 #define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2) 1809 #define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1) 1810 #define DRM_XE_OASTATUS_REPORT_LOST (1 << 0) 1811 1812 /** @reserved: reserved for future use */ 1813 __u64 reserved[3]; 1814 }; 1815 1816 /** 1817 * struct drm_xe_oa_stream_info - OA stream info returned from 1818 * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl 1819 */ 1820 struct drm_xe_oa_stream_info { 1821 /** @extensions: Pointer to the first extension struct, if any */ 1822 __u64 extensions; 1823 1824 /** @oa_buf_size: OA buffer size */ 1825 __u64 oa_buf_size; 1826 1827 /** @reserved: reserved for future use */ 1828 __u64 reserved[3]; 1829 }; 1830 1831 /** 1832 * enum drm_xe_pxp_session_type - Supported PXP session types. 1833 * 1834 * We currently only support HWDRM sessions, which are used for protected 1835 * content that ends up being displayed, but the HW supports multiple types, so 1836 * we might extend support in the future. 1837 */ 1838 enum drm_xe_pxp_session_type { 1839 /** @DRM_XE_PXP_TYPE_NONE: PXP not used */ 1840 DRM_XE_PXP_TYPE_NONE = 0, 1841 /** 1842 * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends 1843 * up on the display. 1844 */ 1845 DRM_XE_PXP_TYPE_HWDRM = 1, 1846 }; 1847 1848 /* ID of the protected content session managed by Xe when PXP is active */ 1849 #define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf 1850 1851 #if defined(__cplusplus) 1852 } 1853 #endif 1854 1855 #endif /* _UAPI_XE_DRM_H_ */ 1856