1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #ifndef _UAPI_XE_DRM_H_ 7 #define _UAPI_XE_DRM_H_ 8 9 #include "drm.h" 10 11 #if defined(__cplusplus) 12 extern "C" { 13 #endif 14 15 /* 16 * Please note that modifications to all structs defined here are 17 * subject to backwards-compatibility constraints. 18 * Sections in this file are organized as follows: 19 * 1. IOCTL definition 20 * 2. Extension definition and helper structs 21 * 3. IOCTL's Query structs in the order of the Query's entries. 22 * 4. The rest of IOCTL structs in the order of IOCTL declaration. 23 */ 24 25 /** 26 * DOC: Xe Device Block Diagram 27 * 28 * The diagram below represents a high-level simplification of a discrete 29 * GPU supported by the Xe driver. It shows some device components which 30 * are necessary to understand this API, as well as how their relations 31 * to each other. This diagram does not represent real hardware:: 32 * 33 * ┌──────────────────────────────────────────────────────────────────┐ 34 * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │ 35 * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │ 36 * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │ 37 * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │ 38 * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │ 39 * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ 40 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 41 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │ 42 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 43 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 44 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │ 45 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 46 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 47 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │ 48 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 49 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 50 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │ 51 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 52 * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │ 53 * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │ 54 * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │ 55 * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │ 56 * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │ 57 * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │ 58 * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │ 59 * └─────────────────────────────Device0───────┬──────────────────────┘ 60 * │ 61 * ───────────────────────┴────────── PCI bus 62 */ 63 64 /** 65 * DOC: Xe uAPI Overview 66 * 67 * This section aims to describe the Xe's IOCTL entries, its structs, and other 68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related 69 * entries and usage. 70 * 71 * List of supported IOCTLs: 72 * - &DRM_IOCTL_XE_DEVICE_QUERY 73 * - &DRM_IOCTL_XE_GEM_CREATE 74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET 75 * - &DRM_IOCTL_XE_VM_CREATE 76 * - &DRM_IOCTL_XE_VM_DESTROY 77 * - &DRM_IOCTL_XE_VM_BIND 78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE 79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY 80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY 81 * - &DRM_IOCTL_XE_EXEC 82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE 83 * - &DRM_IOCTL_XE_OBSERVATION 84 */ 85 86 /* 87 * xe specific ioctls. 88 * 89 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 90 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 91 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 92 */ 93 #define DRM_XE_DEVICE_QUERY 0x00 94 #define DRM_XE_GEM_CREATE 0x01 95 #define DRM_XE_GEM_MMAP_OFFSET 0x02 96 #define DRM_XE_VM_CREATE 0x03 97 #define DRM_XE_VM_DESTROY 0x04 98 #define DRM_XE_VM_BIND 0x05 99 #define DRM_XE_EXEC_QUEUE_CREATE 0x06 100 #define DRM_XE_EXEC_QUEUE_DESTROY 0x07 101 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08 102 #define DRM_XE_EXEC 0x09 103 #define DRM_XE_WAIT_USER_FENCE 0x0a 104 #define DRM_XE_OBSERVATION 0x0b 105 106 /* Must be kept compact -- no holes */ 107 108 #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) 109 #define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) 110 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) 111 #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) 112 #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) 113 #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) 114 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) 115 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) 116 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) 117 #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) 118 #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) 119 #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param) 120 121 /** 122 * DOC: Xe IOCTL Extensions 123 * 124 * Before detailing the IOCTLs and its structs, it is important to highlight 125 * that every IOCTL in Xe is extensible. 126 * 127 * Many interfaces need to grow over time. In most cases we can simply 128 * extend the struct and have userspace pass in more data. Another option, 129 * as demonstrated by Vulkan's approach to providing extensions for forward 130 * and backward compatibility, is to use a list of optional structs to 131 * provide those extra details. 132 * 133 * The key advantage to using an extension chain is that it allows us to 134 * redefine the interface more easily than an ever growing struct of 135 * increasing complexity, and for large parts of that interface to be 136 * entirely optional. The downside is more pointer chasing; chasing across 137 * the __user boundary with pointers encapsulated inside u64. 138 * 139 * Example chaining: 140 * 141 * .. code-block:: C 142 * 143 * struct drm_xe_user_extension ext3 { 144 * .next_extension = 0, // end 145 * .name = ..., 146 * }; 147 * struct drm_xe_user_extension ext2 { 148 * .next_extension = (uintptr_t)&ext3, 149 * .name = ..., 150 * }; 151 * struct drm_xe_user_extension ext1 { 152 * .next_extension = (uintptr_t)&ext2, 153 * .name = ..., 154 * }; 155 * 156 * Typically the struct drm_xe_user_extension would be embedded in some uAPI 157 * struct, and in this case we would feed it the head of the chain(i.e ext1), 158 * which would then apply all of the above extensions. 159 */ 160 161 /** 162 * struct drm_xe_user_extension - Base class for defining a chain of extensions 163 */ 164 struct drm_xe_user_extension { 165 /** 166 * @next_extension: 167 * 168 * Pointer to the next struct drm_xe_user_extension, or zero if the end. 169 */ 170 __u64 next_extension; 171 172 /** 173 * @name: Name of the extension. 174 * 175 * Note that the name here is just some integer. 176 * 177 * Also note that the name space for this is not global for the whole 178 * driver, but rather its scope/meaning is limited to the specific piece 179 * of uAPI which has embedded the struct drm_xe_user_extension. 180 */ 181 __u32 name; 182 183 /** 184 * @pad: MBZ 185 * 186 * All undefined bits must be zero. 187 */ 188 __u32 pad; 189 }; 190 191 /** 192 * struct drm_xe_ext_set_property - Generic set property extension 193 * 194 * A generic struct that allows any of the Xe's IOCTL to be extended 195 * with a set_property operation. 196 */ 197 struct drm_xe_ext_set_property { 198 /** @base: base user extension */ 199 struct drm_xe_user_extension base; 200 201 /** @property: property to set */ 202 __u32 property; 203 204 /** @pad: MBZ */ 205 __u32 pad; 206 207 /** @value: property value */ 208 __u64 value; 209 210 /** @reserved: Reserved */ 211 __u64 reserved[2]; 212 }; 213 214 /** 215 * struct drm_xe_engine_class_instance - instance of an engine class 216 * 217 * It is returned as part of the @drm_xe_engine, but it also is used as 218 * the input of engine selection for both @drm_xe_exec_queue_create and 219 * @drm_xe_query_engine_cycles 220 * 221 * The @engine_class can be: 222 * - %DRM_XE_ENGINE_CLASS_RENDER 223 * - %DRM_XE_ENGINE_CLASS_COPY 224 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE 225 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 226 * - %DRM_XE_ENGINE_CLASS_COMPUTE 227 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual 228 * hardware engine class). Used for creating ordered queues of VM 229 * bind operations. 230 */ 231 struct drm_xe_engine_class_instance { 232 #define DRM_XE_ENGINE_CLASS_RENDER 0 233 #define DRM_XE_ENGINE_CLASS_COPY 1 234 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 235 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 236 #define DRM_XE_ENGINE_CLASS_COMPUTE 4 237 #define DRM_XE_ENGINE_CLASS_VM_BIND 5 238 /** @engine_class: engine class id */ 239 __u16 engine_class; 240 /** @engine_instance: engine instance id */ 241 __u16 engine_instance; 242 /** @gt_id: Unique ID of this GT within the PCI Device */ 243 __u16 gt_id; 244 /** @pad: MBZ */ 245 __u16 pad; 246 }; 247 248 /** 249 * struct drm_xe_engine - describe hardware engine 250 */ 251 struct drm_xe_engine { 252 /** @instance: The @drm_xe_engine_class_instance */ 253 struct drm_xe_engine_class_instance instance; 254 255 /** @reserved: Reserved */ 256 __u64 reserved[3]; 257 }; 258 259 /** 260 * struct drm_xe_query_engines - describe engines 261 * 262 * If a query is made with a struct @drm_xe_device_query where .query 263 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of 264 * struct @drm_xe_query_engines in .data. 265 */ 266 struct drm_xe_query_engines { 267 /** @num_engines: number of engines returned in @engines */ 268 __u32 num_engines; 269 /** @pad: MBZ */ 270 __u32 pad; 271 /** @engines: The returned engines for this device */ 272 struct drm_xe_engine engines[]; 273 }; 274 275 /** 276 * enum drm_xe_memory_class - Supported memory classes. 277 */ 278 enum drm_xe_memory_class { 279 /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ 280 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0, 281 /** 282 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this 283 * represents the memory that is local to the device, which we 284 * call VRAM. Not valid on integrated platforms. 285 */ 286 DRM_XE_MEM_REGION_CLASS_VRAM 287 }; 288 289 /** 290 * struct drm_xe_mem_region - Describes some region as known to 291 * the driver. 292 */ 293 struct drm_xe_mem_region { 294 /** 295 * @mem_class: The memory class describing this region. 296 * 297 * See enum drm_xe_memory_class for supported values. 298 */ 299 __u16 mem_class; 300 /** 301 * @instance: The unique ID for this region, which serves as the 302 * index in the placement bitmask used as argument for 303 * &DRM_IOCTL_XE_GEM_CREATE 304 */ 305 __u16 instance; 306 /** 307 * @min_page_size: Min page-size in bytes for this region. 308 * 309 * When the kernel allocates memory for this region, the 310 * underlying pages will be at least @min_page_size in size. 311 * Buffer objects with an allowable placement in this region must be 312 * created with a size aligned to this value. 313 * GPU virtual address mappings of (parts of) buffer objects that 314 * may be placed in this region must also have their GPU virtual 315 * address and range aligned to this value. 316 * Affected IOCTLS will return %-EINVAL if alignment restrictions are 317 * not met. 318 */ 319 __u32 min_page_size; 320 /** 321 * @total_size: The usable size in bytes for this region. 322 */ 323 __u64 total_size; 324 /** 325 * @used: Estimate of the memory used in bytes for this region. 326 * 327 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 328 * accounting. Without this the value here will always equal 329 * zero. 330 */ 331 __u64 used; 332 /** 333 * @cpu_visible_size: How much of this region can be CPU 334 * accessed, in bytes. 335 * 336 * This will always be <= @total_size, and the remainder (if 337 * any) will not be CPU accessible. If the CPU accessible part 338 * is smaller than @total_size then this is referred to as a 339 * small BAR system. 340 * 341 * On systems without small BAR (full BAR), the probed_size will 342 * always equal the @total_size, since all of it will be CPU 343 * accessible. 344 * 345 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM 346 * regions (for other types the value here will always equal 347 * zero). 348 */ 349 __u64 cpu_visible_size; 350 /** 351 * @cpu_visible_used: Estimate of CPU visible memory used, in 352 * bytes. 353 * 354 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 355 * accounting. Without this the value here will always equal 356 * zero. Note this is only currently tracked for 357 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value 358 * here will always be zero). 359 */ 360 __u64 cpu_visible_used; 361 /** @reserved: Reserved */ 362 __u64 reserved[6]; 363 }; 364 365 /** 366 * struct drm_xe_query_mem_regions - describe memory regions 367 * 368 * If a query is made with a struct drm_xe_device_query where .query 369 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses 370 * struct drm_xe_query_mem_regions in .data. 371 */ 372 struct drm_xe_query_mem_regions { 373 /** @num_mem_regions: number of memory regions returned in @mem_regions */ 374 __u32 num_mem_regions; 375 /** @pad: MBZ */ 376 __u32 pad; 377 /** @mem_regions: The returned memory regions for this device */ 378 struct drm_xe_mem_region mem_regions[]; 379 }; 380 381 /** 382 * struct drm_xe_query_config - describe the device configuration 383 * 384 * If a query is made with a struct drm_xe_device_query where .query 385 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses 386 * struct drm_xe_query_config in .data. 387 * 388 * The index in @info can be: 389 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits) 390 * and the device revision (next 8 bits) 391 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device 392 * configuration, see list below 393 * 394 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device 395 * has usable VRAM 396 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device 397 * has low latency hint support 398 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment 399 * required by this device, typically SZ_4K or SZ_64K 400 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address 401 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest 402 * available exec queue priority 403 */ 404 struct drm_xe_query_config { 405 /** @num_params: number of parameters returned in info */ 406 __u32 num_params; 407 408 /** @pad: MBZ */ 409 __u32 pad; 410 411 #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 412 #define DRM_XE_QUERY_CONFIG_FLAGS 1 413 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0) 414 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1) 415 #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2 416 #define DRM_XE_QUERY_CONFIG_VA_BITS 3 417 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4 418 /** @info: array of elements containing the config info */ 419 __u64 info[]; 420 }; 421 422 /** 423 * struct drm_xe_gt - describe an individual GT. 424 * 425 * To be used with drm_xe_query_gt_list, which will return a list with all the 426 * existing GT individual descriptions. 427 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for 428 * implementing graphics and/or media operations. 429 * 430 * The index in @type can be: 431 * - %DRM_XE_QUERY_GT_TYPE_MAIN 432 * - %DRM_XE_QUERY_GT_TYPE_MEDIA 433 */ 434 struct drm_xe_gt { 435 #define DRM_XE_QUERY_GT_TYPE_MAIN 0 436 #define DRM_XE_QUERY_GT_TYPE_MEDIA 1 437 /** @type: GT type: Main or Media */ 438 __u16 type; 439 /** @tile_id: Tile ID where this GT lives (Information only) */ 440 __u16 tile_id; 441 /** @gt_id: Unique ID of this GT within the PCI Device */ 442 __u16 gt_id; 443 /** @pad: MBZ */ 444 __u16 pad[3]; 445 /** @reference_clock: A clock frequency for timestamp */ 446 __u32 reference_clock; 447 /** 448 * @near_mem_regions: Bit mask of instances from 449 * drm_xe_query_mem_regions that are nearest to the current engines 450 * of this GT. 451 * Each index in this mask refers directly to the struct 452 * drm_xe_query_mem_regions' instance, no assumptions should 453 * be made about order. The type of each region is described 454 * by struct drm_xe_query_mem_regions' mem_class. 455 */ 456 __u64 near_mem_regions; 457 /** 458 * @far_mem_regions: Bit mask of instances from 459 * drm_xe_query_mem_regions that are far from the engines of this GT. 460 * In general, they have extra indirections when compared to the 461 * @near_mem_regions. For a discrete device this could mean system 462 * memory and memory living in a different tile. 463 * Each index in this mask refers directly to the struct 464 * drm_xe_query_mem_regions' instance, no assumptions should 465 * be made about order. The type of each region is described 466 * by struct drm_xe_query_mem_regions' mem_class. 467 */ 468 __u64 far_mem_regions; 469 /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */ 470 __u16 ip_ver_major; 471 /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */ 472 __u16 ip_ver_minor; 473 /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */ 474 __u16 ip_ver_rev; 475 /** @pad2: MBZ */ 476 __u16 pad2; 477 /** @reserved: Reserved */ 478 __u64 reserved[7]; 479 }; 480 481 /** 482 * struct drm_xe_query_gt_list - A list with GT description items. 483 * 484 * If a query is made with a struct drm_xe_device_query where .query 485 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct 486 * drm_xe_query_gt_list in .data. 487 */ 488 struct drm_xe_query_gt_list { 489 /** @num_gt: number of GT items returned in gt_list */ 490 __u32 num_gt; 491 /** @pad: MBZ */ 492 __u32 pad; 493 /** @gt_list: The GT list returned for this device */ 494 struct drm_xe_gt gt_list[]; 495 }; 496 497 /** 498 * struct drm_xe_query_topology_mask - describe the topology mask of a GT 499 * 500 * This is the hardware topology which reflects the internal physical 501 * structure of the GPU. 502 * 503 * If a query is made with a struct drm_xe_device_query where .query 504 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses 505 * struct drm_xe_query_topology_mask in .data. 506 * 507 * The @type can be: 508 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices 509 * (DSS) available for geometry operations. For example a query response 510 * containing the following in mask: 511 * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00`` 512 * means 32 DSS are available for geometry. 513 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices 514 * (DSS) available for compute operations. For example a query response 515 * containing the following in mask: 516 * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` 517 * means 32 DSS are available for compute. 518 * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type 519 * may be omitted if the driver is unable to query the mask from the 520 * hardware. 521 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) 522 * available per Dual Sub Slices (DSS). For example a query response 523 * containing the following in mask: 524 * ``EU_PER_DSS ff ff 00 00 00 00 00 00`` 525 * means each DSS has 16 SIMD8 EUs. This type may be omitted if device 526 * doesn't have SIMD8 EUs. 527 * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution 528 * Units (EU) available per Dual Sub Slices (DSS). For example a query 529 * response containing the following in mask: 530 * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00`` 531 * means each DSS has 16 SIMD16 EUs. This type may be omitted if device 532 * doesn't have SIMD16 EUs. 533 */ 534 struct drm_xe_query_topology_mask { 535 /** @gt_id: GT ID the mask is associated with */ 536 __u16 gt_id; 537 538 #define DRM_XE_TOPO_DSS_GEOMETRY 1 539 #define DRM_XE_TOPO_DSS_COMPUTE 2 540 #define DRM_XE_TOPO_L3_BANK 3 541 #define DRM_XE_TOPO_EU_PER_DSS 4 542 #define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5 543 /** @type: type of mask */ 544 __u16 type; 545 546 /** @num_bytes: number of bytes in requested mask */ 547 __u32 num_bytes; 548 549 /** @mask: little-endian mask of @num_bytes */ 550 __u8 mask[]; 551 }; 552 553 /** 554 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps 555 * 556 * If a query is made with a struct drm_xe_device_query where .query is equal to 557 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles 558 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and 559 * .data points to this allocated structure. 560 * 561 * The query returns the engine cycles, which along with GT's @reference_clock, 562 * can be used to calculate the engine timestamp. In addition the 563 * query returns a set of cpu timestamps that indicate when the command 564 * streamer cycle count was captured. 565 */ 566 struct drm_xe_query_engine_cycles { 567 /** 568 * @eci: This is input by the user and is the engine for which command 569 * streamer cycles is queried. 570 */ 571 struct drm_xe_engine_class_instance eci; 572 573 /** 574 * @clockid: This is input by the user and is the reference clock id for 575 * CPU timestamp. For definition, see clock_gettime(2) and 576 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, 577 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. 578 */ 579 __s32 clockid; 580 581 /** @width: Width of the engine cycle counter in bits. */ 582 __u32 width; 583 584 /** 585 * @engine_cycles: Engine cycles as read from its register 586 * at 0x358 offset. 587 */ 588 __u64 engine_cycles; 589 590 /** 591 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before 592 * reading the engine_cycles register using the reference clockid set by the 593 * user. 594 */ 595 __u64 cpu_timestamp; 596 597 /** 598 * @cpu_delta: Time delta in ns captured around reading the lower dword 599 * of the engine_cycles register. 600 */ 601 __u64 cpu_delta; 602 }; 603 604 /** 605 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version 606 * 607 * Given a uc_type this will return the branch, major, minor and patch version 608 * of the micro-controller firmware. 609 */ 610 struct drm_xe_query_uc_fw_version { 611 /** @uc_type: The micro-controller type to query firmware version */ 612 #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0 613 #define XE_QUERY_UC_TYPE_HUC 1 614 __u16 uc_type; 615 616 /** @pad: MBZ */ 617 __u16 pad; 618 619 /** @branch_ver: branch uc fw version */ 620 __u32 branch_ver; 621 /** @major_ver: major uc fw version */ 622 __u32 major_ver; 623 /** @minor_ver: minor uc fw version */ 624 __u32 minor_ver; 625 /** @patch_ver: patch uc fw version */ 626 __u32 patch_ver; 627 628 /** @pad2: MBZ */ 629 __u32 pad2; 630 631 /** @reserved: Reserved */ 632 __u64 reserved; 633 }; 634 635 /** 636 * struct drm_xe_query_pxp_status - query if PXP is ready 637 * 638 * If PXP is enabled and no fatal error has occurred, the status will be set to 639 * one of the following values: 640 * 0: PXP init still in progress 641 * 1: PXP init complete 642 * 643 * If PXP is not enabled or something has gone wrong, the query will be failed 644 * with one of the following error codes: 645 * -ENODEV: PXP not supported or disabled; 646 * -EIO: fatal error occurred during init, so PXP will never be enabled; 647 * -EINVAL: incorrect value provided as part of the query; 648 * -EFAULT: error copying the memory between kernel and userspace. 649 * 650 * The status can only be 0 in the first few seconds after driver load. If 651 * everything works as expected, the status will transition to init complete in 652 * less than 1 second, while in case of errors the driver might take longer to 653 * start returning an error code, but it should still take less than 10 seconds. 654 * 655 * The supported session type bitmask is based on the values in 656 * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore 657 * is not reported in the bitmask. 658 * 659 */ 660 struct drm_xe_query_pxp_status { 661 /** @status: current PXP status */ 662 __u32 status; 663 664 /** @supported_session_types: bitmask of supported PXP session types */ 665 __u32 supported_session_types; 666 }; 667 668 /** 669 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main 670 * structure to query device information 671 * 672 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_* 673 * and sets the value in the query member. This determines the type of 674 * the structure provided by the driver in data, among struct drm_xe_query_*. 675 * 676 * The @query can be: 677 * - %DRM_XE_DEVICE_QUERY_ENGINES 678 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS 679 * - %DRM_XE_DEVICE_QUERY_CONFIG 680 * - %DRM_XE_DEVICE_QUERY_GT_LIST 681 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware 682 * configuration of the device such as information on slices, memory, 683 * caches, and so on. It is provided as a table of key / value 684 * attributes. 685 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 686 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 687 * - %DRM_XE_DEVICE_QUERY_PXP_STATUS 688 * 689 * If size is set to 0, the driver fills it with the required size for 690 * the requested type of data to query. If size is equal to the required 691 * size, the queried information is copied into data. If size is set to 692 * a value different from 0 and different from the required size, the 693 * IOCTL call returns -EINVAL. 694 * 695 * For example the following code snippet allows retrieving and printing 696 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES: 697 * 698 * .. code-block:: C 699 * 700 * struct drm_xe_query_engines *engines; 701 * struct drm_xe_device_query query = { 702 * .extensions = 0, 703 * .query = DRM_XE_DEVICE_QUERY_ENGINES, 704 * .size = 0, 705 * .data = 0, 706 * }; 707 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); 708 * engines = malloc(query.size); 709 * query.data = (uintptr_t)engines; 710 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); 711 * for (int i = 0; i < engines->num_engines; i++) { 712 * printf("Engine %d: %s\n", i, 713 * engines->engines[i].instance.engine_class == 714 * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": 715 * engines->engines[i].instance.engine_class == 716 * DRM_XE_ENGINE_CLASS_COPY ? "COPY": 717 * engines->engines[i].instance.engine_class == 718 * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": 719 * engines->engines[i].instance.engine_class == 720 * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": 721 * engines->engines[i].instance.engine_class == 722 * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": 723 * "UNKNOWN"); 724 * } 725 * free(engines); 726 */ 727 struct drm_xe_device_query { 728 /** @extensions: Pointer to the first extension struct, if any */ 729 __u64 extensions; 730 731 #define DRM_XE_DEVICE_QUERY_ENGINES 0 732 #define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1 733 #define DRM_XE_DEVICE_QUERY_CONFIG 2 734 #define DRM_XE_DEVICE_QUERY_GT_LIST 3 735 #define DRM_XE_DEVICE_QUERY_HWCONFIG 4 736 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 737 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 738 #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7 739 #define DRM_XE_DEVICE_QUERY_OA_UNITS 8 740 #define DRM_XE_DEVICE_QUERY_PXP_STATUS 9 741 #define DRM_XE_DEVICE_QUERY_EU_STALL 10 742 /** @query: The type of data to query */ 743 __u32 query; 744 745 /** @size: Size of the queried data */ 746 __u32 size; 747 748 /** @data: Queried data is placed here */ 749 __u64 data; 750 751 /** @reserved: Reserved */ 752 __u64 reserved[2]; 753 }; 754 755 /** 756 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for 757 * gem creation 758 * 759 * The @flags can be: 760 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING 761 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT 762 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a 763 * possible placement, ensure that the corresponding VRAM allocation 764 * will always use the CPU accessible part of VRAM. This is important 765 * for small-bar systems (on full-bar systems this gets turned into a 766 * noop). 767 * Note1: System memory can be used as an extra placement if the kernel 768 * should spill the allocation to system memory, if space can't be made 769 * available in the CPU accessible part of VRAM (giving the same 770 * behaviour as the i915 interface, see 771 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS). 772 * Note2: For clear-color CCS surfaces the kernel needs to read the 773 * clear-color value stored in the buffer, and on discrete platforms we 774 * need to use VRAM for display surfaces, therefore the kernel requires 775 * setting this flag for such objects, otherwise an error is thrown on 776 * small-bar systems. 777 * 778 * @cpu_caching supports the following values: 779 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back 780 * caching. On iGPU this can't be used for scanout surfaces. Currently 781 * not allowed for objects placed in VRAM. 782 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This 783 * is uncached. Scanout surfaces should likely use this. All objects 784 * that can be placed in VRAM must use this. 785 * 786 * This ioctl supports setting the following properties via the 787 * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the 788 * generic @drm_xe_ext_set_property struct: 789 * 790 * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session 791 * this object will be used with. Valid values are listed in enum 792 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so 793 * there is no need to explicitly set that. Objects used with session of type 794 * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation 795 * event occurs after their creation. Attempting to flip an invalid object 796 * will cause a black frame to be displayed instead. Submissions with invalid 797 * objects mapped in the VM will be rejected. 798 */ 799 struct drm_xe_gem_create { 800 #define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY 0 801 #define DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE 0 802 /** @extensions: Pointer to the first extension struct, if any */ 803 __u64 extensions; 804 805 /** 806 * @size: Size of the object to be created, must match region 807 * (system or vram) minimum alignment (&min_page_size). 808 */ 809 __u64 size; 810 811 /** 812 * @placement: A mask of memory instances of where BO can be placed. 813 * Each index in this mask refers directly to the struct 814 * drm_xe_query_mem_regions' instance, no assumptions should 815 * be made about order. The type of each region is described 816 * by struct drm_xe_query_mem_regions' mem_class. 817 */ 818 __u32 placement; 819 820 #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0) 821 #define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1) 822 #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2) 823 /** 824 * @flags: Flags, currently a mask of memory instances of where BO can 825 * be placed 826 */ 827 __u32 flags; 828 829 /** 830 * @vm_id: Attached VM, if any 831 * 832 * If a VM is specified, this BO must: 833 * 834 * 1. Only ever be bound to that VM. 835 * 2. Cannot be exported as a PRIME fd. 836 */ 837 __u32 vm_id; 838 839 /** 840 * @handle: Returned handle for the object. 841 * 842 * Object handles are nonzero. 843 */ 844 __u32 handle; 845 846 #define DRM_XE_GEM_CPU_CACHING_WB 1 847 #define DRM_XE_GEM_CPU_CACHING_WC 2 848 /** 849 * @cpu_caching: The CPU caching mode to select for this object. If 850 * mmaping the object the mode selected here will also be used. The 851 * exception is when mapping system memory (including data evicted 852 * to system) on discrete GPUs. The caching mode selected will 853 * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency 854 * between GPU- and CPU is guaranteed. The caching mode of 855 * existing CPU-mappings will be updated transparently to 856 * user-space clients. 857 */ 858 __u16 cpu_caching; 859 /** @pad: MBZ */ 860 __u16 pad[3]; 861 862 /** @reserved: Reserved */ 863 __u64 reserved[2]; 864 }; 865 866 /** 867 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET 868 * 869 * The @flags can be: 870 * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset 871 * for use in mmap ioctl. Writing to the returned mmap address will generate a 872 * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing 873 * to VRAM which would also add overhead), acting like an MI_MEM_FENCE 874 * instruction. 875 * 876 * Note: The mmap size can be at most 4K, due to HW limitations. As a result 877 * this interface is only supported on CPU architectures that support 4K page 878 * size. The mmap_offset ioctl will detect this and gracefully return an 879 * error, where userspace is expected to have a different fallback method for 880 * triggering a barrier. 881 * 882 * Roughly the usage would be as follows: 883 * 884 * .. code-block:: C 885 * 886 * struct drm_xe_gem_mmap_offset mmo = { 887 * .handle = 0, // must be set to 0 888 * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, 889 * }; 890 * 891 * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); 892 * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); 893 * map[i] = 0xdeadbeaf; // issue barrier 894 */ 895 struct drm_xe_gem_mmap_offset { 896 /** @extensions: Pointer to the first extension struct, if any */ 897 __u64 extensions; 898 899 /** @handle: Handle for the object being mapped. */ 900 __u32 handle; 901 902 #define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0) 903 /** @flags: Flags */ 904 __u32 flags; 905 906 /** @offset: The fake offset to use for subsequent mmap call */ 907 __u64 offset; 908 909 /** @reserved: Reserved */ 910 __u64 reserved[2]; 911 }; 912 913 /** 914 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE 915 * 916 * The @flags can be: 917 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE 918 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts 919 * exec submissions to its exec_queues that don't have an upper time 920 * limit on the job execution time. But exec submissions to these 921 * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, 922 * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, 923 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. 924 * LR VMs can be created in recoverable page-fault mode using 925 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. 926 * If that flag is omitted, the UMD can not rely on the slightly 927 * different per-VM overcommit semantics that are enabled by 928 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may 929 * still enable recoverable pagefaults if supported by the device. 930 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also 931 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on 932 * demand when accessed, and also allows per-VM overcommit of memory. 933 * The xe driver internally uses recoverable pagefaults to implement 934 * this. 935 */ 936 struct drm_xe_vm_create { 937 /** @extensions: Pointer to the first extension struct, if any */ 938 __u64 extensions; 939 940 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0) 941 #define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1) 942 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2) 943 /** @flags: Flags */ 944 __u32 flags; 945 946 /** @vm_id: Returned VM ID */ 947 __u32 vm_id; 948 949 /** @reserved: Reserved */ 950 __u64 reserved[2]; 951 }; 952 953 /** 954 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY 955 */ 956 struct drm_xe_vm_destroy { 957 /** @vm_id: VM ID */ 958 __u32 vm_id; 959 960 /** @pad: MBZ */ 961 __u32 pad; 962 963 /** @reserved: Reserved */ 964 __u64 reserved[2]; 965 }; 966 967 /** 968 * struct drm_xe_vm_bind_op - run bind operations 969 * 970 * The @op can be: 971 * - %DRM_XE_VM_BIND_OP_MAP 972 * - %DRM_XE_VM_BIND_OP_UNMAP 973 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR 974 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL 975 * - %DRM_XE_VM_BIND_OP_PREFETCH 976 * 977 * and the @flags can be: 978 * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only 979 * to ensure write protection 980 * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the 981 * MAP operation immediately rather than deferring the MAP to the page 982 * fault handler. This is implied on a non-faulting VM as there is no 983 * fault handler to defer to. 984 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page 985 * tables are setup with a special bit which indicates writes are 986 * dropped and all reads return zero. In the future, the NULL flags 987 * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO 988 * handle MBZ, and the BO offset MBZ. This flag is intended to 989 * implement VK sparse bindings. 990 * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP, 991 * reject the binding if the encryption key is no longer valid. This 992 * flag has no effect on BOs that are not marked as using PXP. 993 */ 994 struct drm_xe_vm_bind_op { 995 /** @extensions: Pointer to the first extension struct, if any */ 996 __u64 extensions; 997 998 /** 999 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP 1000 */ 1001 __u32 obj; 1002 1003 /** 1004 * @pat_index: The platform defined @pat_index to use for this mapping. 1005 * The index basically maps to some predefined memory attributes, 1006 * including things like caching, coherency, compression etc. The exact 1007 * meaning of the pat_index is platform specific and defined in the 1008 * Bspec and PRMs. When the KMD sets up the binding the index here is 1009 * encoded into the ppGTT PTE. 1010 * 1011 * For coherency the @pat_index needs to be at least 1way coherent when 1012 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD 1013 * will extract the coherency mode from the @pat_index and reject if 1014 * there is a mismatch (see note below for pre-MTL platforms). 1015 * 1016 * Note: On pre-MTL platforms there is only a caching mode and no 1017 * explicit coherency mode, but on such hardware there is always a 1018 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with 1019 * CPU caches even with the caching mode set as uncached. It's only the 1020 * display engine that is incoherent (on dgpu it must be in VRAM which 1021 * is always mapped as WC on the CPU). However to keep the uapi somewhat 1022 * consistent with newer platforms the KMD groups the different cache 1023 * levels into the following coherency buckets on all pre-MTL platforms: 1024 * 1025 * ppGTT UC -> COH_NONE 1026 * ppGTT WC -> COH_NONE 1027 * ppGTT WT -> COH_NONE 1028 * ppGTT WB -> COH_AT_LEAST_1WAY 1029 * 1030 * In practice UC/WC/WT should only ever used for scanout surfaces on 1031 * such platforms (or perhaps in general for dma-buf if shared with 1032 * another device) since it is only the display engine that is actually 1033 * incoherent. Everything else should typically use WB given that we 1034 * have a shared-LLC. On MTL+ this completely changes and the HW 1035 * defines the coherency mode as part of the @pat_index, where 1036 * incoherent GT access is possible. 1037 * 1038 * Note: For userptr and externally imported dma-buf the kernel expects 1039 * either 1WAY or 2WAY for the @pat_index. 1040 * 1041 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions 1042 * on the @pat_index. For such mappings there is no actual memory being 1043 * mapped (the address in the PTE is invalid), so the various PAT memory 1044 * attributes likely do not apply. Simply leaving as zero is one 1045 * option (still a valid pat_index). 1046 */ 1047 __u16 pat_index; 1048 1049 /** @pad: MBZ */ 1050 __u16 pad; 1051 1052 union { 1053 /** 1054 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, 1055 * ignored for unbind 1056 */ 1057 __u64 obj_offset; 1058 1059 /** @userptr: user pointer to bind on */ 1060 __u64 userptr; 1061 }; 1062 1063 /** 1064 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL 1065 */ 1066 __u64 range; 1067 1068 /** @addr: Address to operate on, MBZ for UNMAP_ALL */ 1069 __u64 addr; 1070 1071 #define DRM_XE_VM_BIND_OP_MAP 0x0 1072 #define DRM_XE_VM_BIND_OP_UNMAP 0x1 1073 #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2 1074 #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3 1075 #define DRM_XE_VM_BIND_OP_PREFETCH 0x4 1076 /** @op: Bind operation to perform */ 1077 __u32 op; 1078 1079 #define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) 1080 #define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) 1081 #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) 1082 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) 1083 #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) 1084 /** @flags: Bind flags */ 1085 __u32 flags; 1086 1087 /** 1088 * @prefetch_mem_region_instance: Memory region to prefetch VMA to. 1089 * It is a region instance, not a mask. 1090 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation. 1091 */ 1092 __u32 prefetch_mem_region_instance; 1093 1094 /** @pad2: MBZ */ 1095 __u32 pad2; 1096 1097 /** @reserved: Reserved */ 1098 __u64 reserved[3]; 1099 }; 1100 1101 /** 1102 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND 1103 * 1104 * Below is an example of a minimal use of @drm_xe_vm_bind to 1105 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to 1106 * illustrate `userptr`. It can be synchronized by using the example 1107 * provided for @drm_xe_sync. 1108 * 1109 * .. code-block:: C 1110 * 1111 * data = aligned_alloc(ALIGNMENT, BO_SIZE); 1112 * struct drm_xe_vm_bind bind = { 1113 * .vm_id = vm, 1114 * .num_binds = 1, 1115 * .bind.obj = 0, 1116 * .bind.obj_offset = to_user_pointer(data), 1117 * .bind.range = BO_SIZE, 1118 * .bind.addr = BIND_ADDRESS, 1119 * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR, 1120 * .bind.flags = 0, 1121 * .num_syncs = 1, 1122 * .syncs = &sync, 1123 * .exec_queue_id = 0, 1124 * }; 1125 * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind); 1126 * 1127 */ 1128 struct drm_xe_vm_bind { 1129 /** @extensions: Pointer to the first extension struct, if any */ 1130 __u64 extensions; 1131 1132 /** @vm_id: The ID of the VM to bind to */ 1133 __u32 vm_id; 1134 1135 /** 1136 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND 1137 * and exec queue must have same vm_id. If zero, the default VM bind engine 1138 * is used. 1139 */ 1140 __u32 exec_queue_id; 1141 1142 /** @pad: MBZ */ 1143 __u32 pad; 1144 1145 /** @num_binds: number of binds in this IOCTL */ 1146 __u32 num_binds; 1147 1148 union { 1149 /** @bind: used if num_binds == 1 */ 1150 struct drm_xe_vm_bind_op bind; 1151 1152 /** 1153 * @vector_of_binds: userptr to array of struct 1154 * drm_xe_vm_bind_op if num_binds > 1 1155 */ 1156 __u64 vector_of_binds; 1157 }; 1158 1159 /** @pad2: MBZ */ 1160 __u32 pad2; 1161 1162 /** @num_syncs: amount of syncs to wait on */ 1163 __u32 num_syncs; 1164 1165 /** @syncs: pointer to struct drm_xe_sync array */ 1166 __u64 syncs; 1167 1168 /** @reserved: Reserved */ 1169 __u64 reserved[2]; 1170 }; 1171 1172 /** 1173 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE 1174 * 1175 * This ioctl supports setting the following properties via the 1176 * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the 1177 * generic @drm_xe_ext_set_property struct: 1178 * 1179 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority. 1180 * CAP_SYS_NICE is required to set a value above normal. 1181 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice 1182 * duration in microseconds. 1183 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session 1184 * this queue will be used with. Valid values are listed in enum 1185 * drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so 1186 * there is no need to explicitly set that. When a queue of type 1187 * %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session 1188 * (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running. 1189 * Given that going into a power-saving state kills PXP HWDRM sessions, 1190 * runtime PM will be blocked while queues of this type are alive. 1191 * All PXP queues will be killed if a PXP invalidation event occurs. 1192 * 1193 * The example below shows how to use @drm_xe_exec_queue_create to create 1194 * a simple exec_queue (no parallel submission) of class 1195 * &DRM_XE_ENGINE_CLASS_RENDER. 1196 * 1197 * .. code-block:: C 1198 * 1199 * struct drm_xe_engine_class_instance instance = { 1200 * .engine_class = DRM_XE_ENGINE_CLASS_RENDER, 1201 * }; 1202 * struct drm_xe_exec_queue_create exec_queue_create = { 1203 * .extensions = 0, 1204 * .vm_id = vm, 1205 * .num_bb_per_exec = 1, 1206 * .num_eng_per_bb = 1, 1207 * .instances = to_user_pointer(&instance), 1208 * }; 1209 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); 1210 * 1211 * Allow users to provide a hint to kernel for cases demanding low latency 1212 * profile. Please note it will have impact on power consumption. User can 1213 * indicate low latency hint with flag while creating exec queue as 1214 * mentioned below, 1215 * 1216 * struct drm_xe_exec_queue_create exec_queue_create = { 1217 * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT, 1218 * .extensions = 0, 1219 * .vm_id = vm, 1220 * .num_bb_per_exec = 1, 1221 * .num_eng_per_bb = 1, 1222 * .instances = to_user_pointer(&instance), 1223 * }; 1224 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create); 1225 * 1226 */ 1227 struct drm_xe_exec_queue_create { 1228 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 1229 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 1230 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 1231 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2 1232 /** @extensions: Pointer to the first extension struct, if any */ 1233 __u64 extensions; 1234 1235 /** @width: submission width (number BB per exec) for this exec queue */ 1236 __u16 width; 1237 1238 /** @num_placements: number of valid placements for this exec queue */ 1239 __u16 num_placements; 1240 1241 /** @vm_id: VM to use for this exec queue */ 1242 __u32 vm_id; 1243 1244 #define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0) 1245 /** @flags: flags to use for this exec queue */ 1246 __u32 flags; 1247 1248 /** @exec_queue_id: Returned exec queue ID */ 1249 __u32 exec_queue_id; 1250 1251 /** 1252 * @instances: user pointer to a 2-d array of struct 1253 * drm_xe_engine_class_instance 1254 * 1255 * length = width (i) * num_placements (j) 1256 * index = j + i * width 1257 */ 1258 __u64 instances; 1259 1260 /** @reserved: Reserved */ 1261 __u64 reserved[2]; 1262 }; 1263 1264 /** 1265 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY 1266 */ 1267 struct drm_xe_exec_queue_destroy { 1268 /** @exec_queue_id: Exec queue ID */ 1269 __u32 exec_queue_id; 1270 1271 /** @pad: MBZ */ 1272 __u32 pad; 1273 1274 /** @reserved: Reserved */ 1275 __u64 reserved[2]; 1276 }; 1277 1278 /** 1279 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY 1280 * 1281 * The @property can be: 1282 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 1283 */ 1284 struct drm_xe_exec_queue_get_property { 1285 /** @extensions: Pointer to the first extension struct, if any */ 1286 __u64 extensions; 1287 1288 /** @exec_queue_id: Exec queue ID */ 1289 __u32 exec_queue_id; 1290 1291 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 1292 /** @property: property to get */ 1293 __u32 property; 1294 1295 /** @value: property value */ 1296 __u64 value; 1297 1298 /** @reserved: Reserved */ 1299 __u64 reserved[2]; 1300 }; 1301 1302 /** 1303 * struct drm_xe_sync - sync object 1304 * 1305 * The @type can be: 1306 * - %DRM_XE_SYNC_TYPE_SYNCOBJ 1307 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 1308 * - %DRM_XE_SYNC_TYPE_USER_FENCE 1309 * 1310 * and the @flags can be: 1311 * - %DRM_XE_SYNC_FLAG_SIGNAL 1312 * 1313 * A minimal use of @drm_xe_sync looks like this: 1314 * 1315 * .. code-block:: C 1316 * 1317 * struct drm_xe_sync sync = { 1318 * .flags = DRM_XE_SYNC_FLAG_SIGNAL, 1319 * .type = DRM_XE_SYNC_TYPE_SYNCOBJ, 1320 * }; 1321 * struct drm_syncobj_create syncobj_create = { 0 }; 1322 * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create); 1323 * sync.handle = syncobj_create.handle; 1324 * ... 1325 * use of &sync in drm_xe_exec or drm_xe_vm_bind 1326 * ... 1327 * struct drm_syncobj_wait wait = { 1328 * .handles = &sync.handle, 1329 * .timeout_nsec = INT64_MAX, 1330 * .count_handles = 1, 1331 * .flags = 0, 1332 * .first_signaled = 0, 1333 * .pad = 0, 1334 * }; 1335 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); 1336 */ 1337 struct drm_xe_sync { 1338 /** @extensions: Pointer to the first extension struct, if any */ 1339 __u64 extensions; 1340 1341 #define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0 1342 #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1 1343 #define DRM_XE_SYNC_TYPE_USER_FENCE 0x2 1344 /** @type: Type of the this sync object */ 1345 __u32 type; 1346 1347 #define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0) 1348 /** @flags: Sync Flags */ 1349 __u32 flags; 1350 1351 union { 1352 /** @handle: Handle for the object */ 1353 __u32 handle; 1354 1355 /** 1356 * @addr: Address of user fence. When sync is passed in via exec 1357 * IOCTL this is a GPU address in the VM. When sync passed in via 1358 * VM bind IOCTL this is a user pointer. In either case, it is 1359 * the users responsibility that this address is present and 1360 * mapped when the user fence is signalled. Must be qword 1361 * aligned. 1362 */ 1363 __u64 addr; 1364 }; 1365 1366 /** 1367 * @timeline_value: Input for the timeline sync object. Needs to be 1368 * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. 1369 */ 1370 __u64 timeline_value; 1371 1372 /** @reserved: Reserved */ 1373 __u64 reserved[2]; 1374 }; 1375 1376 /** 1377 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC 1378 * 1379 * This is an example to use @drm_xe_exec for execution of the object 1380 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue 1381 * (see example in @drm_xe_exec_queue_create). It can be synchronized 1382 * by using the example provided for @drm_xe_sync. 1383 * 1384 * .. code-block:: C 1385 * 1386 * struct drm_xe_exec exec = { 1387 * .exec_queue_id = exec_queue, 1388 * .syncs = &sync, 1389 * .num_syncs = 1, 1390 * .address = BIND_ADDRESS, 1391 * .num_batch_buffer = 1, 1392 * }; 1393 * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec); 1394 * 1395 */ 1396 struct drm_xe_exec { 1397 /** @extensions: Pointer to the first extension struct, if any */ 1398 __u64 extensions; 1399 1400 /** @exec_queue_id: Exec queue ID for the batch buffer */ 1401 __u32 exec_queue_id; 1402 1403 /** @num_syncs: Amount of struct drm_xe_sync in array. */ 1404 __u32 num_syncs; 1405 1406 /** @syncs: Pointer to struct drm_xe_sync array. */ 1407 __u64 syncs; 1408 1409 /** 1410 * @address: address of batch buffer if num_batch_buffer == 1 or an 1411 * array of batch buffer addresses 1412 */ 1413 __u64 address; 1414 1415 /** 1416 * @num_batch_buffer: number of batch buffer in this exec, must match 1417 * the width of the engine 1418 */ 1419 __u16 num_batch_buffer; 1420 1421 /** @pad: MBZ */ 1422 __u16 pad[3]; 1423 1424 /** @reserved: Reserved */ 1425 __u64 reserved[2]; 1426 }; 1427 1428 /** 1429 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE 1430 * 1431 * Wait on user fence, XE will wake-up on every HW engine interrupt in the 1432 * instances list and check if user fence is complete:: 1433 * 1434 * (*addr & MASK) OP (VALUE & MASK) 1435 * 1436 * Returns to user on user fence completion or timeout. 1437 * 1438 * The @op can be: 1439 * - %DRM_XE_UFENCE_WAIT_OP_EQ 1440 * - %DRM_XE_UFENCE_WAIT_OP_NEQ 1441 * - %DRM_XE_UFENCE_WAIT_OP_GT 1442 * - %DRM_XE_UFENCE_WAIT_OP_GTE 1443 * - %DRM_XE_UFENCE_WAIT_OP_LT 1444 * - %DRM_XE_UFENCE_WAIT_OP_LTE 1445 * 1446 * and the @flags can be: 1447 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME 1448 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP 1449 * 1450 * The @mask values can be for example: 1451 * - 0xffu for u8 1452 * - 0xffffu for u16 1453 * - 0xffffffffu for u32 1454 * - 0xffffffffffffffffu for u64 1455 */ 1456 struct drm_xe_wait_user_fence { 1457 /** @extensions: Pointer to the first extension struct, if any */ 1458 __u64 extensions; 1459 1460 /** 1461 * @addr: user pointer address to wait on, must qword aligned 1462 */ 1463 __u64 addr; 1464 1465 #define DRM_XE_UFENCE_WAIT_OP_EQ 0x0 1466 #define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1 1467 #define DRM_XE_UFENCE_WAIT_OP_GT 0x2 1468 #define DRM_XE_UFENCE_WAIT_OP_GTE 0x3 1469 #define DRM_XE_UFENCE_WAIT_OP_LT 0x4 1470 #define DRM_XE_UFENCE_WAIT_OP_LTE 0x5 1471 /** @op: wait operation (type of comparison) */ 1472 __u16 op; 1473 1474 #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0) 1475 /** @flags: wait flags */ 1476 __u16 flags; 1477 1478 /** @pad: MBZ */ 1479 __u32 pad; 1480 1481 /** @value: compare value */ 1482 __u64 value; 1483 1484 /** @mask: comparison mask */ 1485 __u64 mask; 1486 1487 /** 1488 * @timeout: how long to wait before bailing, value in nanoseconds. 1489 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout) 1490 * it contains timeout expressed in nanoseconds to wait (fence will 1491 * expire at now() + timeout). 1492 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait 1493 * will end at timeout (uses system MONOTONIC_CLOCK). 1494 * Passing negative timeout leads to neverending wait. 1495 * 1496 * On relative timeout this value is updated with timeout left 1497 * (for restarting the call in case of signal delivery). 1498 * On absolute timeout this value stays intact (restarted call still 1499 * expire at the same point of time). 1500 */ 1501 __s64 timeout; 1502 1503 /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */ 1504 __u32 exec_queue_id; 1505 1506 /** @pad2: MBZ */ 1507 __u32 pad2; 1508 1509 /** @reserved: Reserved */ 1510 __u64 reserved[2]; 1511 }; 1512 1513 /** 1514 * enum drm_xe_observation_type - Observation stream types 1515 */ 1516 enum drm_xe_observation_type { 1517 /** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */ 1518 DRM_XE_OBSERVATION_TYPE_OA, 1519 /** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */ 1520 DRM_XE_OBSERVATION_TYPE_EU_STALL, 1521 }; 1522 1523 /** 1524 * enum drm_xe_observation_op - Observation stream ops 1525 */ 1526 enum drm_xe_observation_op { 1527 /** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */ 1528 DRM_XE_OBSERVATION_OP_STREAM_OPEN, 1529 1530 /** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */ 1531 DRM_XE_OBSERVATION_OP_ADD_CONFIG, 1532 1533 /** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */ 1534 DRM_XE_OBSERVATION_OP_REMOVE_CONFIG, 1535 }; 1536 1537 /** 1538 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION 1539 * 1540 * The observation layer enables multiplexing observation streams of 1541 * multiple types. The actual params for a particular stream operation are 1542 * supplied via the @param pointer (use __copy_from_user to get these 1543 * params). 1544 */ 1545 struct drm_xe_observation_param { 1546 /** @extensions: Pointer to the first extension struct, if any */ 1547 __u64 extensions; 1548 /** @observation_type: observation stream type, of enum @drm_xe_observation_type */ 1549 __u64 observation_type; 1550 /** @observation_op: observation stream op, of enum @drm_xe_observation_op */ 1551 __u64 observation_op; 1552 /** @param: Pointer to actual stream params */ 1553 __u64 param; 1554 }; 1555 1556 /** 1557 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's 1558 * 1559 * Information exchanged between userspace and kernel for observation fd 1560 * ioctl's is stream type specific 1561 */ 1562 enum drm_xe_observation_ioctls { 1563 /** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */ 1564 DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0), 1565 1566 /** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */ 1567 DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1), 1568 1569 /** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */ 1570 DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2), 1571 1572 /** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */ 1573 DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3), 1574 1575 /** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */ 1576 DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4), 1577 }; 1578 1579 /** 1580 * enum drm_xe_oa_unit_type - OA unit types 1581 */ 1582 enum drm_xe_oa_unit_type { 1583 /** 1584 * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered 1585 * sub-types of OAG. For OAR/OAC, use OAG. 1586 */ 1587 DRM_XE_OA_UNIT_TYPE_OAG, 1588 1589 /** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */ 1590 DRM_XE_OA_UNIT_TYPE_OAM, 1591 }; 1592 1593 /** 1594 * struct drm_xe_oa_unit - describe OA unit 1595 */ 1596 struct drm_xe_oa_unit { 1597 /** @extensions: Pointer to the first extension struct, if any */ 1598 __u64 extensions; 1599 1600 /** @oa_unit_id: OA unit ID */ 1601 __u32 oa_unit_id; 1602 1603 /** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */ 1604 __u32 oa_unit_type; 1605 1606 /** @capabilities: OA capabilities bit-mask */ 1607 __u64 capabilities; 1608 #define DRM_XE_OA_CAPS_BASE (1 << 0) 1609 #define DRM_XE_OA_CAPS_SYNCS (1 << 1) 1610 #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) 1611 #define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) 1612 1613 /** @oa_timestamp_freq: OA timestamp freq */ 1614 __u64 oa_timestamp_freq; 1615 1616 /** @reserved: MBZ */ 1617 __u64 reserved[4]; 1618 1619 /** @num_engines: number of engines in @eci array */ 1620 __u64 num_engines; 1621 1622 /** @eci: engines attached to this OA unit */ 1623 struct drm_xe_engine_class_instance eci[]; 1624 }; 1625 1626 /** 1627 * struct drm_xe_query_oa_units - describe OA units 1628 * 1629 * If a query is made with a struct drm_xe_device_query where .query 1630 * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct 1631 * drm_xe_query_oa_units in .data. 1632 * 1633 * OA unit properties for all OA units can be accessed using a code block 1634 * such as the one below: 1635 * 1636 * .. code-block:: C 1637 * 1638 * struct drm_xe_query_oa_units *qoa; 1639 * struct drm_xe_oa_unit *oau; 1640 * u8 *poau; 1641 * 1642 * // malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then: 1643 * poau = (u8 *)&qoa->oa_units[0]; 1644 * for (int i = 0; i < qoa->num_oa_units; i++) { 1645 * oau = (struct drm_xe_oa_unit *)poau; 1646 * // Access 'struct drm_xe_oa_unit' fields here 1647 * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]); 1648 * } 1649 */ 1650 struct drm_xe_query_oa_units { 1651 /** @extensions: Pointer to the first extension struct, if any */ 1652 __u64 extensions; 1653 /** @num_oa_units: number of OA units returned in oau[] */ 1654 __u32 num_oa_units; 1655 /** @pad: MBZ */ 1656 __u32 pad; 1657 /** 1658 * @oa_units: struct @drm_xe_oa_unit array returned for this device. 1659 * Written below as a u64 array to avoid problems with nested flexible 1660 * arrays with some compilers 1661 */ 1662 __u64 oa_units[]; 1663 }; 1664 1665 /** 1666 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec 1667 * 52198/60942 1668 */ 1669 enum drm_xe_oa_format_type { 1670 /** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */ 1671 DRM_XE_OA_FMT_TYPE_OAG, 1672 /** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */ 1673 DRM_XE_OA_FMT_TYPE_OAR, 1674 /** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */ 1675 DRM_XE_OA_FMT_TYPE_OAM, 1676 /** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */ 1677 DRM_XE_OA_FMT_TYPE_OAC, 1678 /** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */ 1679 DRM_XE_OA_FMT_TYPE_OAM_MPEC, 1680 /** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */ 1681 DRM_XE_OA_FMT_TYPE_PEC, 1682 }; 1683 1684 /** 1685 * enum drm_xe_oa_property_id - OA stream property id's 1686 * 1687 * Stream params are specified as a chain of @drm_xe_ext_set_property 1688 * struct's, with @property values from enum @drm_xe_oa_property_id and 1689 * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY. 1690 * @param field in struct @drm_xe_observation_param points to the first 1691 * @drm_xe_ext_set_property struct. 1692 * 1693 * Exactly the same mechanism is also used for stream reconfiguration using the 1694 * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a 1695 * subset of properties below can be specified for stream reconfiguration. 1696 */ 1697 enum drm_xe_oa_property_id { 1698 #define DRM_XE_OA_EXTENSION_SET_PROPERTY 0 1699 /** 1700 * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open 1701 * the OA stream, see @oa_unit_id in 'struct 1702 * drm_xe_query_oa_units'. Defaults to 0 if not provided. 1703 */ 1704 DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1, 1705 1706 /** 1707 * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw 1708 * OA unit reports or stream samples in a global buffer attached to an 1709 * OA unit. 1710 */ 1711 DRM_XE_OA_PROPERTY_SAMPLE_OA, 1712 1713 /** 1714 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA 1715 * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG. 1716 */ 1717 DRM_XE_OA_PROPERTY_OA_METRIC_SET, 1718 1719 /** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */ 1720 DRM_XE_OA_PROPERTY_OA_FORMAT, 1721 /* 1722 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942, 1723 * in terms of the following quantities: a. enum @drm_xe_oa_format_type 1724 * b. Counter select c. Counter size and d. BC report. Also refer to the 1725 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c. 1726 */ 1727 #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0) 1728 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8) 1729 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16) 1730 #define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24) 1731 1732 /** 1733 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit 1734 * sampling with sampling frequency proportional to 2^(period_exponent + 1) 1735 */ 1736 DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT, 1737 1738 /** 1739 * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA 1740 * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE). 1741 */ 1742 DRM_XE_OA_PROPERTY_OA_DISABLED, 1743 1744 /** 1745 * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific 1746 * @exec_queue_id. OA queries can be executed on this exec queue. 1747 */ 1748 DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID, 1749 1750 /** 1751 * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to 1752 * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0. 1753 */ 1754 DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE, 1755 1756 /** 1757 * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing 1758 * to be disabled for the stream exec queue. 1759 */ 1760 DRM_XE_OA_PROPERTY_NO_PREEMPT, 1761 1762 /** 1763 * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array 1764 * specified in @DRM_XE_OA_PROPERTY_SYNCS 1765 */ 1766 DRM_XE_OA_PROPERTY_NUM_SYNCS, 1767 1768 /** 1769 * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array 1770 * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA 1771 * configuration will wait till input fences signal. Output fences 1772 * will signal after the new OA configuration takes effect. For 1773 * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar 1774 * to the VM bind case. 1775 */ 1776 DRM_XE_OA_PROPERTY_SYNCS, 1777 1778 /** 1779 * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be 1780 * allocated by the driver in bytes. Supported sizes are powers of 1781 * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA 1782 * buffer is allocated by default. 1783 */ 1784 DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE, 1785 1786 /** 1787 * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait 1788 * for before unblocking poll or read 1789 */ 1790 DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS, 1791 }; 1792 1793 /** 1794 * struct drm_xe_oa_config - OA metric configuration 1795 * 1796 * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A 1797 * particular config can be specified when opening an OA stream using 1798 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property. 1799 */ 1800 struct drm_xe_oa_config { 1801 /** @extensions: Pointer to the first extension struct, if any */ 1802 __u64 extensions; 1803 1804 /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */ 1805 char uuid[36]; 1806 1807 /** @n_regs: Number of regs in @regs_ptr */ 1808 __u32 n_regs; 1809 1810 /** 1811 * @regs_ptr: Pointer to (register address, value) pairs for OA config 1812 * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs). 1813 */ 1814 __u64 regs_ptr; 1815 }; 1816 1817 /** 1818 * struct drm_xe_oa_stream_status - OA stream status returned from 1819 * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can 1820 * call the ioctl to query stream status in response to EIO errno from 1821 * observation fd read(). 1822 */ 1823 struct drm_xe_oa_stream_status { 1824 /** @extensions: Pointer to the first extension struct, if any */ 1825 __u64 extensions; 1826 1827 /** @oa_status: OA stream status (see Bspec 46717/61226) */ 1828 __u64 oa_status; 1829 #define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL (1 << 3) 1830 #define DRM_XE_OASTATUS_COUNTER_OVERFLOW (1 << 2) 1831 #define DRM_XE_OASTATUS_BUFFER_OVERFLOW (1 << 1) 1832 #define DRM_XE_OASTATUS_REPORT_LOST (1 << 0) 1833 1834 /** @reserved: reserved for future use */ 1835 __u64 reserved[3]; 1836 }; 1837 1838 /** 1839 * struct drm_xe_oa_stream_info - OA stream info returned from 1840 * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl 1841 */ 1842 struct drm_xe_oa_stream_info { 1843 /** @extensions: Pointer to the first extension struct, if any */ 1844 __u64 extensions; 1845 1846 /** @oa_buf_size: OA buffer size */ 1847 __u64 oa_buf_size; 1848 1849 /** @reserved: reserved for future use */ 1850 __u64 reserved[3]; 1851 }; 1852 1853 /** 1854 * enum drm_xe_pxp_session_type - Supported PXP session types. 1855 * 1856 * We currently only support HWDRM sessions, which are used for protected 1857 * content that ends up being displayed, but the HW supports multiple types, so 1858 * we might extend support in the future. 1859 */ 1860 enum drm_xe_pxp_session_type { 1861 /** @DRM_XE_PXP_TYPE_NONE: PXP not used */ 1862 DRM_XE_PXP_TYPE_NONE = 0, 1863 /** 1864 * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends 1865 * up on the display. 1866 */ 1867 DRM_XE_PXP_TYPE_HWDRM = 1, 1868 }; 1869 1870 /* ID of the protected content session managed by Xe when PXP is active */ 1871 #define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf 1872 1873 /** 1874 * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids. 1875 * 1876 * These properties are passed to the driver at open as a chain of 1877 * @drm_xe_ext_set_property structures with @property set to these 1878 * properties' enums and @value set to the corresponding values of these 1879 * properties. @drm_xe_user_extension base.name should be set to 1880 * @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY. 1881 * 1882 * With the file descriptor obtained from open, user space must enable 1883 * the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before 1884 * calling read(). EIO errno from read() indicates HW dropped data 1885 * due to full buffer. 1886 */ 1887 enum drm_xe_eu_stall_property_id { 1888 #define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0 1889 /** 1890 * @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which 1891 * EU stall data will be captured. 1892 */ 1893 DRM_XE_EU_STALL_PROP_GT_ID = 1, 1894 1895 /** 1896 * @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in 1897 * GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall 1898 */ 1899 DRM_XE_EU_STALL_PROP_SAMPLE_RATE, 1900 1901 /** 1902 * @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of 1903 * EU stall data reports to be present in the kernel buffer 1904 * before unblocking a blocked poll or read. 1905 */ 1906 DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS, 1907 }; 1908 1909 /** 1910 * struct drm_xe_query_eu_stall - Information about EU stall sampling. 1911 * 1912 * If a query is made with a struct @drm_xe_device_query where .query 1913 * is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses 1914 * struct @drm_xe_query_eu_stall in .data. 1915 */ 1916 struct drm_xe_query_eu_stall { 1917 /** @extensions: Pointer to the first extension struct, if any */ 1918 __u64 extensions; 1919 1920 /** @capabilities: EU stall capabilities bit-mask */ 1921 __u64 capabilities; 1922 #define DRM_XE_EU_STALL_CAPS_BASE (1 << 0) 1923 1924 /** @record_size: size of each EU stall data record */ 1925 __u64 record_size; 1926 1927 /** @per_xecore_buf_size: internal per XeCore buffer size */ 1928 __u64 per_xecore_buf_size; 1929 1930 /** @reserved: Reserved */ 1931 __u64 reserved[5]; 1932 1933 /** @num_sampling_rates: Number of sampling rates in @sampling_rates array */ 1934 __u64 num_sampling_rates; 1935 1936 /** 1937 * @sampling_rates: Flexible array of sampling rates 1938 * sorted in the fastest to slowest order. 1939 * Sampling rates are specified in GPU clock cycles. 1940 */ 1941 __u64 sampling_rates[]; 1942 }; 1943 1944 #if defined(__cplusplus) 1945 } 1946 #endif 1947 1948 #endif /* _UAPI_XE_DRM_H_ */ 1949