1718dceddSDavid Howells /* 2718dceddSDavid Howells * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3718dceddSDavid Howells * All Rights Reserved. 4718dceddSDavid Howells * 5718dceddSDavid Howells * Permission is hereby granted, free of charge, to any person obtaining a 6718dceddSDavid Howells * copy of this software and associated documentation files (the 7718dceddSDavid Howells * "Software"), to deal in the Software without restriction, including 8718dceddSDavid Howells * without limitation the rights to use, copy, modify, merge, publish, 9718dceddSDavid Howells * distribute, sub license, and/or sell copies of the Software, and to 10718dceddSDavid Howells * permit persons to whom the Software is furnished to do so, subject to 11718dceddSDavid Howells * the following conditions: 12718dceddSDavid Howells * 13718dceddSDavid Howells * The above copyright notice and this permission notice (including the 14718dceddSDavid Howells * next paragraph) shall be included in all copies or substantial portions 15718dceddSDavid Howells * of the Software. 16718dceddSDavid Howells * 17718dceddSDavid Howells * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18718dceddSDavid Howells * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19718dceddSDavid Howells * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20718dceddSDavid Howells * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21718dceddSDavid Howells * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22718dceddSDavid Howells * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23718dceddSDavid Howells * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24718dceddSDavid Howells * 25718dceddSDavid Howells */ 26718dceddSDavid Howells 27718dceddSDavid Howells #ifndef _UAPI_I915_DRM_H_ 28718dceddSDavid Howells #define _UAPI_I915_DRM_H_ 29718dceddSDavid Howells 301049102fSGabriel Laskar #include "drm.h" 31718dceddSDavid Howells 32b1c1f5c4SEmil Velikov #if defined(__cplusplus) 33b1c1f5c4SEmil Velikov extern "C" { 34b1c1f5c4SEmil Velikov #endif 35b1c1f5c4SEmil Velikov 36718dceddSDavid Howells /* Please note that modifications to all structs defined here are 37718dceddSDavid Howells * subject to backwards-compatibility constraints. 38718dceddSDavid Howells */ 39718dceddSDavid Howells 40cce723edSBen Widawsky /** 4133eaede0SRandy Dunlap * DOC: uevents generated by i915 on its device node 42cce723edSBen Widawsky * 43cce723edSBen Widawsky * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 4433eaede0SRandy Dunlap * event from the GPU L3 cache. Additional information supplied is ROW, 4535a85ac6SBen Widawsky * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 4633eaede0SRandy Dunlap * track of these events, and if a specific cache-line seems to have a 4733eaede0SRandy Dunlap * persistent error, remap it with the L3 remapping tool supplied in 4835a85ac6SBen Widawsky * intel-gpu-tools. The value supplied with the event is always 1. 49cce723edSBen Widawsky * 50cce723edSBen Widawsky * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51cce723edSBen Widawsky * hangcheck. The error detection event is a good indicator of when things 52cce723edSBen Widawsky * began to go badly. The value supplied with the event is a 1 upon error 53cce723edSBen Widawsky * detection, and a 0 upon reset completion, signifying no more error 54cce723edSBen Widawsky * exists. NOTE: Disabling hangcheck or reset via module parameter will 55cce723edSBen Widawsky * cause the related events to not be seen. 56cce723edSBen Widawsky * 57cce723edSBen Widawsky * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 5866137f54SRandy Dunlap * GPU. The value supplied with the event is always 1. NOTE: Disable 59cce723edSBen Widawsky * reset via module parameter will cause this event to not be seen. 60cce723edSBen Widawsky */ 61cce723edSBen Widawsky #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62cce723edSBen Widawsky #define I915_ERROR_UEVENT "ERROR" 63cce723edSBen Widawsky #define I915_RESET_UEVENT "RESET" 64718dceddSDavid Howells 6519d053d4SMatthew Auld /** 6619d053d4SMatthew Auld * struct i915_user_extension - Base class for defining a chain of extensions 679d1305efSChris Wilson * 689d1305efSChris Wilson * Many interfaces need to grow over time. In most cases we can simply 699d1305efSChris Wilson * extend the struct and have userspace pass in more data. Another option, 709d1305efSChris Wilson * as demonstrated by Vulkan's approach to providing extensions for forward 719d1305efSChris Wilson * and backward compatibility, is to use a list of optional structs to 729d1305efSChris Wilson * provide those extra details. 739d1305efSChris Wilson * 749d1305efSChris Wilson * The key advantage to using an extension chain is that it allows us to 759d1305efSChris Wilson * redefine the interface more easily than an ever growing struct of 769d1305efSChris Wilson * increasing complexity, and for large parts of that interface to be 779d1305efSChris Wilson * entirely optional. The downside is more pointer chasing; chasing across 789d1305efSChris Wilson * the __user boundary with pointers encapsulated inside u64. 7919d053d4SMatthew Auld * 8019d053d4SMatthew Auld * Example chaining: 8119d053d4SMatthew Auld * 8219d053d4SMatthew Auld * .. code-block:: C 8319d053d4SMatthew Auld * 8419d053d4SMatthew Auld * struct i915_user_extension ext3 { 8519d053d4SMatthew Auld * .next_extension = 0, // end 8619d053d4SMatthew Auld * .name = ..., 8719d053d4SMatthew Auld * }; 8819d053d4SMatthew Auld * struct i915_user_extension ext2 { 8919d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext3, 9019d053d4SMatthew Auld * .name = ..., 9119d053d4SMatthew Auld * }; 9219d053d4SMatthew Auld * struct i915_user_extension ext1 { 9319d053d4SMatthew Auld * .next_extension = (uintptr_t)&ext2, 9419d053d4SMatthew Auld * .name = ..., 9519d053d4SMatthew Auld * }; 9619d053d4SMatthew Auld * 9719d053d4SMatthew Auld * Typically the struct i915_user_extension would be embedded in some uAPI 9819d053d4SMatthew Auld * struct, and in this case we would feed it the head of the chain(i.e ext1), 9919d053d4SMatthew Auld * which would then apply all of the above extensions. 10019d053d4SMatthew Auld * 1019d1305efSChris Wilson */ 1029d1305efSChris Wilson struct i915_user_extension { 10319d053d4SMatthew Auld /** 10419d053d4SMatthew Auld * @next_extension: 10519d053d4SMatthew Auld * 10619d053d4SMatthew Auld * Pointer to the next struct i915_user_extension, or zero if the end. 10719d053d4SMatthew Auld */ 1089d1305efSChris Wilson __u64 next_extension; 10919d053d4SMatthew Auld /** 11019d053d4SMatthew Auld * @name: Name of the extension. 11119d053d4SMatthew Auld * 11219d053d4SMatthew Auld * Note that the name here is just some integer. 11319d053d4SMatthew Auld * 11419d053d4SMatthew Auld * Also note that the name space for this is not global for the whole 11519d053d4SMatthew Auld * driver, but rather its scope/meaning is limited to the specific piece 11619d053d4SMatthew Auld * of uAPI which has embedded the struct i915_user_extension. 11719d053d4SMatthew Auld */ 1189d1305efSChris Wilson __u32 name; 11919d053d4SMatthew Auld /** 12019d053d4SMatthew Auld * @flags: MBZ 12119d053d4SMatthew Auld * 12219d053d4SMatthew Auld * All undefined bits must be zero. 12319d053d4SMatthew Auld */ 12419d053d4SMatthew Auld __u32 flags; 12519d053d4SMatthew Auld /** 12619d053d4SMatthew Auld * @rsvd: MBZ 12719d053d4SMatthew Auld * 12819d053d4SMatthew Auld * Reserved for future use; must be zero. 12919d053d4SMatthew Auld */ 13019d053d4SMatthew Auld __u32 rsvd[4]; 1319d1305efSChris Wilson }; 1329d1305efSChris Wilson 1339d1305efSChris Wilson /* 1343373ce2eSImre Deak * MOCS indexes used for GPU surfaces, defining the cacheability of the 1353373ce2eSImre Deak * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 1363373ce2eSImre Deak */ 1373373ce2eSImre Deak enum i915_mocs_table_index { 1383373ce2eSImre Deak /* 1393373ce2eSImre Deak * Not cached anywhere, coherency between CPU and GPU accesses is 1403373ce2eSImre Deak * guaranteed. 1413373ce2eSImre Deak */ 1423373ce2eSImre Deak I915_MOCS_UNCACHED, 1433373ce2eSImre Deak /* 1443373ce2eSImre Deak * Cacheability and coherency controlled by the kernel automatically 1453373ce2eSImre Deak * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 1463373ce2eSImre Deak * usage of the surface (used for display scanout or not). 1473373ce2eSImre Deak */ 1483373ce2eSImre Deak I915_MOCS_PTE, 1493373ce2eSImre Deak /* 1503373ce2eSImre Deak * Cached in all GPU caches available on the platform. 1513373ce2eSImre Deak * Coherency between CPU and GPU accesses to the surface is not 1523373ce2eSImre Deak * guaranteed without extra synchronization. 1533373ce2eSImre Deak */ 1543373ce2eSImre Deak I915_MOCS_CACHED, 1553373ce2eSImre Deak }; 1563373ce2eSImre Deak 157991b4de3SMatt Roper /** 158991b4de3SMatt Roper * enum drm_i915_gem_engine_class - uapi engine type enumeration 159991b4de3SMatt Roper * 1601803fcbcSTvrtko Ursulin * Different engines serve different roles, and there may be more than one 161991b4de3SMatt Roper * engine serving each role. This enum provides a classification of the role 162991b4de3SMatt Roper * of the engine, which may be used when requesting operations to be performed 163991b4de3SMatt Roper * on a certain subset of engines, or for providing information about that 164991b4de3SMatt Roper * group. 1651803fcbcSTvrtko Ursulin */ 1661803fcbcSTvrtko Ursulin enum drm_i915_gem_engine_class { 167991b4de3SMatt Roper /** 168991b4de3SMatt Roper * @I915_ENGINE_CLASS_RENDER: 169991b4de3SMatt Roper * 170991b4de3SMatt Roper * Render engines support instructions used for 3D, Compute (GPGPU), 171991b4de3SMatt Roper * and programmable media workloads. These instructions fetch data and 172991b4de3SMatt Roper * dispatch individual work items to threads that operate in parallel. 173991b4de3SMatt Roper * The threads run small programs (called "kernels" or "shaders") on 174991b4de3SMatt Roper * the GPU's execution units (EUs). 175991b4de3SMatt Roper */ 1761803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_RENDER = 0, 177991b4de3SMatt Roper 178991b4de3SMatt Roper /** 179991b4de3SMatt Roper * @I915_ENGINE_CLASS_COPY: 180991b4de3SMatt Roper * 181991b4de3SMatt Roper * Copy engines (also referred to as "blitters") support instructions 182991b4de3SMatt Roper * that move blocks of data from one location in memory to another, 183991b4de3SMatt Roper * or that fill a specified location of memory with fixed data. 184991b4de3SMatt Roper * Copy engines can perform pre-defined logical or bitwise operations 185991b4de3SMatt Roper * on the source, destination, or pattern data. 186991b4de3SMatt Roper */ 1871803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_COPY = 1, 188991b4de3SMatt Roper 189991b4de3SMatt Roper /** 190991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO: 191991b4de3SMatt Roper * 192991b4de3SMatt Roper * Video engines (also referred to as "bit stream decode" (BSD) or 193991b4de3SMatt Roper * "vdbox") support instructions that perform fixed-function media 194991b4de3SMatt Roper * decode and encode. 195991b4de3SMatt Roper */ 1961803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO = 2, 197991b4de3SMatt Roper 198991b4de3SMatt Roper /** 199991b4de3SMatt Roper * @I915_ENGINE_CLASS_VIDEO_ENHANCE: 200991b4de3SMatt Roper * 201991b4de3SMatt Roper * Video enhancement engines (also referred to as "vebox") support 202991b4de3SMatt Roper * instructions related to image enhancement. 203991b4de3SMatt Roper */ 2041803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 2051803fcbcSTvrtko Ursulin 206ecf8eca5SMatt Roper /** 207ecf8eca5SMatt Roper * @I915_ENGINE_CLASS_COMPUTE: 208ecf8eca5SMatt Roper * 209ecf8eca5SMatt Roper * Compute engines support a subset of the instructions available 210ecf8eca5SMatt Roper * on render engines: compute engines support Compute (GPGPU) and 211ecf8eca5SMatt Roper * programmable media workloads, but do not support the 3D pipeline. 212ecf8eca5SMatt Roper */ 213ecf8eca5SMatt Roper I915_ENGINE_CLASS_COMPUTE = 4, 214ecf8eca5SMatt Roper 215991b4de3SMatt Roper /* Values in this enum should be kept compact. */ 216be03564bSChris Wilson 217991b4de3SMatt Roper /** 218991b4de3SMatt Roper * @I915_ENGINE_CLASS_INVALID: 219991b4de3SMatt Roper * 220991b4de3SMatt Roper * Placeholder value to represent an invalid engine class assignment. 221991b4de3SMatt Roper */ 2221803fcbcSTvrtko Ursulin I915_ENGINE_CLASS_INVALID = -1 2231803fcbcSTvrtko Ursulin }; 2241803fcbcSTvrtko Ursulin 225c94fde8fSMatt Atwood /** 226c94fde8fSMatt Atwood * struct i915_engine_class_instance - Engine class/instance identifier 227c94fde8fSMatt Atwood * 228d1172ab3SChris Wilson * There may be more than one engine fulfilling any role within the system. 229d1172ab3SChris Wilson * Each engine of a class is given a unique instance number and therefore 230d1172ab3SChris Wilson * any engine can be specified by its class:instance tuplet. APIs that allow 231d1172ab3SChris Wilson * access to any engine in the system will use struct i915_engine_class_instance 232d1172ab3SChris Wilson * for this identification. 233d1172ab3SChris Wilson */ 234d1172ab3SChris Wilson struct i915_engine_class_instance { 235c94fde8fSMatt Atwood /** 236c94fde8fSMatt Atwood * @engine_class: 237c94fde8fSMatt Atwood * 238c94fde8fSMatt Atwood * Engine class from enum drm_i915_gem_engine_class 239c94fde8fSMatt Atwood */ 240c94fde8fSMatt Atwood __u16 engine_class; 241976b55f0SChris Wilson #define I915_ENGINE_CLASS_INVALID_NONE -1 2426d06779eSChris Wilson #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 243c94fde8fSMatt Atwood 244c94fde8fSMatt Atwood /** 245c94fde8fSMatt Atwood * @engine_instance: 246c94fde8fSMatt Atwood * 247c94fde8fSMatt Atwood * Engine instance. 248c94fde8fSMatt Atwood */ 249c94fde8fSMatt Atwood __u16 engine_instance; 250d1172ab3SChris Wilson }; 251d1172ab3SChris Wilson 252b46a33e2STvrtko Ursulin /** 253b46a33e2STvrtko Ursulin * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 254b46a33e2STvrtko Ursulin * 255b46a33e2STvrtko Ursulin */ 256b46a33e2STvrtko Ursulin 257b46a33e2STvrtko Ursulin enum drm_i915_pmu_engine_sample { 258b46a33e2STvrtko Ursulin I915_SAMPLE_BUSY = 0, 259b46a33e2STvrtko Ursulin I915_SAMPLE_WAIT = 1, 260b552ae44STvrtko Ursulin I915_SAMPLE_SEMA = 2 261b46a33e2STvrtko Ursulin }; 262b46a33e2STvrtko Ursulin 263b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_BITS (4) 264b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_MASK (0xf) 265b46a33e2STvrtko Ursulin #define I915_PMU_SAMPLE_INSTANCE_BITS (8) 266b46a33e2STvrtko Ursulin #define I915_PMU_CLASS_SHIFT \ 267b46a33e2STvrtko Ursulin (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 268b46a33e2STvrtko Ursulin 269b46a33e2STvrtko Ursulin #define __I915_PMU_ENGINE(class, instance, sample) \ 270b46a33e2STvrtko Ursulin ((class) << I915_PMU_CLASS_SHIFT | \ 271b46a33e2STvrtko Ursulin (instance) << I915_PMU_SAMPLE_BITS | \ 272b46a33e2STvrtko Ursulin (sample)) 273b46a33e2STvrtko Ursulin 274b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_BUSY(class, instance) \ 275b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 276b46a33e2STvrtko Ursulin 277b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_WAIT(class, instance) \ 278b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 279b46a33e2STvrtko Ursulin 280b46a33e2STvrtko Ursulin #define I915_PMU_ENGINE_SEMA(class, instance) \ 281b46a33e2STvrtko Ursulin __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 282b46a33e2STvrtko Ursulin 283bc4be0a3STvrtko Ursulin /* 284bc4be0a3STvrtko Ursulin * Top 4 bits of every non-engine counter are GT id. 285bc4be0a3STvrtko Ursulin */ 286bc4be0a3STvrtko Ursulin #define __I915_PMU_GT_SHIFT (60) 287bc4be0a3STvrtko Ursulin 288bc4be0a3STvrtko Ursulin #define ___I915_PMU_OTHER(gt, x) \ 289bc4be0a3STvrtko Ursulin (((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \ 290bc4be0a3STvrtko Ursulin ((__u64)(gt) << __I915_PMU_GT_SHIFT)) 291bc4be0a3STvrtko Ursulin 292bc4be0a3STvrtko Ursulin #define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x) 293b46a33e2STvrtko Ursulin 294b46a33e2STvrtko Ursulin #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) 295b46a33e2STvrtko Ursulin #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) 2960cd4684dSTvrtko Ursulin #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) 2976060b6aeSTvrtko Ursulin #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) 2988c3b1ba0SChris Wilson #define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4) 2996060b6aeSTvrtko Ursulin 300348fb0cbSTvrtko Ursulin #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY 301b46a33e2STvrtko Ursulin 302bc4be0a3STvrtko Ursulin #define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0) 303bc4be0a3STvrtko Ursulin #define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1) 304bc4be0a3STvrtko Ursulin #define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2) 305bc4be0a3STvrtko Ursulin #define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3) 306bc4be0a3STvrtko Ursulin #define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4) 307bc4be0a3STvrtko Ursulin 308718dceddSDavid Howells /* Each region is a minimum of 16k, and there are at most 255 of them. 309718dceddSDavid Howells */ 310718dceddSDavid Howells #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 311718dceddSDavid Howells * of chars for next/prev indices */ 312718dceddSDavid Howells #define I915_LOG_MIN_TEX_REGION_SIZE 14 313718dceddSDavid Howells 314718dceddSDavid Howells typedef struct _drm_i915_init { 315718dceddSDavid Howells enum { 316718dceddSDavid Howells I915_INIT_DMA = 0x01, 317718dceddSDavid Howells I915_CLEANUP_DMA = 0x02, 318718dceddSDavid Howells I915_RESUME_DMA = 0x03 319718dceddSDavid Howells } func; 320718dceddSDavid Howells unsigned int mmio_offset; 321718dceddSDavid Howells int sarea_priv_offset; 322718dceddSDavid Howells unsigned int ring_start; 323718dceddSDavid Howells unsigned int ring_end; 324718dceddSDavid Howells unsigned int ring_size; 325718dceddSDavid Howells unsigned int front_offset; 326718dceddSDavid Howells unsigned int back_offset; 327718dceddSDavid Howells unsigned int depth_offset; 328718dceddSDavid Howells unsigned int w; 329718dceddSDavid Howells unsigned int h; 330718dceddSDavid Howells unsigned int pitch; 331718dceddSDavid Howells unsigned int pitch_bits; 332718dceddSDavid Howells unsigned int back_pitch; 333718dceddSDavid Howells unsigned int depth_pitch; 334718dceddSDavid Howells unsigned int cpp; 335718dceddSDavid Howells unsigned int chipset; 336718dceddSDavid Howells } drm_i915_init_t; 337718dceddSDavid Howells 338718dceddSDavid Howells typedef struct _drm_i915_sarea { 339718dceddSDavid Howells struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 340718dceddSDavid Howells int last_upload; /* last time texture was uploaded */ 341718dceddSDavid Howells int last_enqueue; /* last time a buffer was enqueued */ 342718dceddSDavid Howells int last_dispatch; /* age of the most recently dispatched buffer */ 343718dceddSDavid Howells int ctxOwner; /* last context to upload state */ 344718dceddSDavid Howells int texAge; 345718dceddSDavid Howells int pf_enabled; /* is pageflipping allowed? */ 346718dceddSDavid Howells int pf_active; 347718dceddSDavid Howells int pf_current_page; /* which buffer is being displayed? */ 348718dceddSDavid Howells int perf_boxes; /* performance boxes to be displayed */ 349718dceddSDavid Howells int width, height; /* screen size in pixels */ 350718dceddSDavid Howells 351718dceddSDavid Howells drm_handle_t front_handle; 352718dceddSDavid Howells int front_offset; 353718dceddSDavid Howells int front_size; 354718dceddSDavid Howells 355718dceddSDavid Howells drm_handle_t back_handle; 356718dceddSDavid Howells int back_offset; 357718dceddSDavid Howells int back_size; 358718dceddSDavid Howells 359718dceddSDavid Howells drm_handle_t depth_handle; 360718dceddSDavid Howells int depth_offset; 361718dceddSDavid Howells int depth_size; 362718dceddSDavid Howells 363718dceddSDavid Howells drm_handle_t tex_handle; 364718dceddSDavid Howells int tex_offset; 365718dceddSDavid Howells int tex_size; 366718dceddSDavid Howells int log_tex_granularity; 367718dceddSDavid Howells int pitch; 368718dceddSDavid Howells int rotation; /* 0, 90, 180 or 270 */ 369718dceddSDavid Howells int rotated_offset; 370718dceddSDavid Howells int rotated_size; 371718dceddSDavid Howells int rotated_pitch; 372718dceddSDavid Howells int virtualX, virtualY; 373718dceddSDavid Howells 374718dceddSDavid Howells unsigned int front_tiled; 375718dceddSDavid Howells unsigned int back_tiled; 376718dceddSDavid Howells unsigned int depth_tiled; 377718dceddSDavid Howells unsigned int rotated_tiled; 378718dceddSDavid Howells unsigned int rotated2_tiled; 379718dceddSDavid Howells 380718dceddSDavid Howells int pipeA_x; 381718dceddSDavid Howells int pipeA_y; 382718dceddSDavid Howells int pipeA_w; 383718dceddSDavid Howells int pipeA_h; 384718dceddSDavid Howells int pipeB_x; 385718dceddSDavid Howells int pipeB_y; 386718dceddSDavid Howells int pipeB_w; 387718dceddSDavid Howells int pipeB_h; 388718dceddSDavid Howells 389718dceddSDavid Howells /* fill out some space for old userspace triple buffer */ 390718dceddSDavid Howells drm_handle_t unused_handle; 391718dceddSDavid Howells __u32 unused1, unused2, unused3; 392718dceddSDavid Howells 393718dceddSDavid Howells /* buffer object handles for static buffers. May change 394718dceddSDavid Howells * over the lifetime of the client. 395718dceddSDavid Howells */ 396718dceddSDavid Howells __u32 front_bo_handle; 397718dceddSDavid Howells __u32 back_bo_handle; 398718dceddSDavid Howells __u32 unused_bo_handle; 399718dceddSDavid Howells __u32 depth_bo_handle; 400718dceddSDavid Howells 401718dceddSDavid Howells } drm_i915_sarea_t; 402718dceddSDavid Howells 403718dceddSDavid Howells /* due to userspace building against these headers we need some compat here */ 404718dceddSDavid Howells #define planeA_x pipeA_x 405718dceddSDavid Howells #define planeA_y pipeA_y 406718dceddSDavid Howells #define planeA_w pipeA_w 407718dceddSDavid Howells #define planeA_h pipeA_h 408718dceddSDavid Howells #define planeB_x pipeB_x 409718dceddSDavid Howells #define planeB_y pipeB_y 410718dceddSDavid Howells #define planeB_w pipeB_w 411718dceddSDavid Howells #define planeB_h pipeB_h 412718dceddSDavid Howells 413718dceddSDavid Howells /* Flags for perf_boxes 414718dceddSDavid Howells */ 415718dceddSDavid Howells #define I915_BOX_RING_EMPTY 0x1 416718dceddSDavid Howells #define I915_BOX_FLIP 0x2 417718dceddSDavid Howells #define I915_BOX_WAIT 0x4 418718dceddSDavid Howells #define I915_BOX_TEXTURE_LOAD 0x8 419718dceddSDavid Howells #define I915_BOX_LOST_CONTEXT 0x10 420718dceddSDavid Howells 42121631f10SDamien Lespiau /* 42221631f10SDamien Lespiau * i915 specific ioctls. 42321631f10SDamien Lespiau * 42421631f10SDamien Lespiau * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 42521631f10SDamien Lespiau * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 42621631f10SDamien Lespiau * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 427718dceddSDavid Howells */ 428718dceddSDavid Howells #define DRM_I915_INIT 0x00 429718dceddSDavid Howells #define DRM_I915_FLUSH 0x01 430718dceddSDavid Howells #define DRM_I915_FLIP 0x02 431718dceddSDavid Howells #define DRM_I915_BATCHBUFFER 0x03 432718dceddSDavid Howells #define DRM_I915_IRQ_EMIT 0x04 433718dceddSDavid Howells #define DRM_I915_IRQ_WAIT 0x05 434718dceddSDavid Howells #define DRM_I915_GETPARAM 0x06 435718dceddSDavid Howells #define DRM_I915_SETPARAM 0x07 436718dceddSDavid Howells #define DRM_I915_ALLOC 0x08 437718dceddSDavid Howells #define DRM_I915_FREE 0x09 438718dceddSDavid Howells #define DRM_I915_INIT_HEAP 0x0a 439718dceddSDavid Howells #define DRM_I915_CMDBUFFER 0x0b 440718dceddSDavid Howells #define DRM_I915_DESTROY_HEAP 0x0c 441718dceddSDavid Howells #define DRM_I915_SET_VBLANK_PIPE 0x0d 442718dceddSDavid Howells #define DRM_I915_GET_VBLANK_PIPE 0x0e 443718dceddSDavid Howells #define DRM_I915_VBLANK_SWAP 0x0f 444718dceddSDavid Howells #define DRM_I915_HWS_ADDR 0x11 445718dceddSDavid Howells #define DRM_I915_GEM_INIT 0x13 446718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER 0x14 447718dceddSDavid Howells #define DRM_I915_GEM_PIN 0x15 448718dceddSDavid Howells #define DRM_I915_GEM_UNPIN 0x16 449718dceddSDavid Howells #define DRM_I915_GEM_BUSY 0x17 450718dceddSDavid Howells #define DRM_I915_GEM_THROTTLE 0x18 451718dceddSDavid Howells #define DRM_I915_GEM_ENTERVT 0x19 452718dceddSDavid Howells #define DRM_I915_GEM_LEAVEVT 0x1a 453718dceddSDavid Howells #define DRM_I915_GEM_CREATE 0x1b 454718dceddSDavid Howells #define DRM_I915_GEM_PREAD 0x1c 455718dceddSDavid Howells #define DRM_I915_GEM_PWRITE 0x1d 456718dceddSDavid Howells #define DRM_I915_GEM_MMAP 0x1e 457718dceddSDavid Howells #define DRM_I915_GEM_SET_DOMAIN 0x1f 458718dceddSDavid Howells #define DRM_I915_GEM_SW_FINISH 0x20 459718dceddSDavid Howells #define DRM_I915_GEM_SET_TILING 0x21 460718dceddSDavid Howells #define DRM_I915_GEM_GET_TILING 0x22 461718dceddSDavid Howells #define DRM_I915_GEM_GET_APERTURE 0x23 462718dceddSDavid Howells #define DRM_I915_GEM_MMAP_GTT 0x24 463718dceddSDavid Howells #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 464718dceddSDavid Howells #define DRM_I915_GEM_MADVISE 0x26 465718dceddSDavid Howells #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 466718dceddSDavid Howells #define DRM_I915_OVERLAY_ATTRS 0x28 467718dceddSDavid Howells #define DRM_I915_GEM_EXECBUFFER2 0x29 468fec0445cSChris Wilson #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 469718dceddSDavid Howells #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 470718dceddSDavid Howells #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 471718dceddSDavid Howells #define DRM_I915_GEM_WAIT 0x2c 472718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 473718dceddSDavid Howells #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 474718dceddSDavid Howells #define DRM_I915_GEM_SET_CACHING 0x2f 475718dceddSDavid Howells #define DRM_I915_GEM_GET_CACHING 0x30 476718dceddSDavid Howells #define DRM_I915_REG_READ 0x31 477b6359918SMika Kuoppala #define DRM_I915_GET_RESET_STATS 0x32 4785cc9ed4bSChris Wilson #define DRM_I915_GEM_USERPTR 0x33 479c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 480c9dc0f35SChris Wilson #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 481eec688e1SRobert Bragg #define DRM_I915_PERF_OPEN 0x36 482f89823c2SLionel Landwerlin #define DRM_I915_PERF_ADD_CONFIG 0x37 483f89823c2SLionel Landwerlin #define DRM_I915_PERF_REMOVE_CONFIG 0x38 484a446ae2cSLionel Landwerlin #define DRM_I915_QUERY 0x39 4857f3f317aSChris Wilson #define DRM_I915_GEM_VM_CREATE 0x3a 4867f3f317aSChris Wilson #define DRM_I915_GEM_VM_DESTROY 0x3b 487ebcb4029SMatthew Auld #define DRM_I915_GEM_CREATE_EXT 0x3c 488be03564bSChris Wilson /* Must be kept compact -- no holes */ 489718dceddSDavid Howells 490718dceddSDavid Howells #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 491718dceddSDavid Howells #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 492718dceddSDavid Howells #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 493718dceddSDavid Howells #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 494718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 495718dceddSDavid Howells #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 496718dceddSDavid Howells #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 497718dceddSDavid Howells #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 498718dceddSDavid Howells #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 499718dceddSDavid Howells #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 500718dceddSDavid Howells #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 501718dceddSDavid Howells #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 502718dceddSDavid Howells #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 503718dceddSDavid Howells #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 504718dceddSDavid Howells #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 505718dceddSDavid Howells #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 506718dceddSDavid Howells #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 507718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 508718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 509718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 510fec0445cSChris Wilson #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 511718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 512718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 513718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 514718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 515718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 516718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 517718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 518718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 519718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 520ebcb4029SMatthew Auld #define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext) 521718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 522718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 523718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 524718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 525cc662126SAbdiel Janulgue #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset) 526718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 527718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 528718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 529718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 530718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 531718dceddSDavid Howells #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 532718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 533718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 534718dceddSDavid Howells #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 535718dceddSDavid Howells #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 5362c60fae1STommi Rantala #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 537718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 538718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 539b9171541SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 540718dceddSDavid Howells #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 541718dceddSDavid Howells #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 542b6359918SMika Kuoppala #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 5435cc9ed4bSChris Wilson #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 544c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 545c9dc0f35SChris Wilson #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 546eec688e1SRobert Bragg #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 547f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 548f89823c2SLionel Landwerlin #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 549a446ae2cSLionel Landwerlin #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 5507f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) 5517f3f317aSChris Wilson #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) 552718dceddSDavid Howells 553718dceddSDavid Howells /* Allow drivers to submit batchbuffers directly to hardware, relying 554718dceddSDavid Howells * on the security mechanisms provided by hardware. 555718dceddSDavid Howells */ 556718dceddSDavid Howells typedef struct drm_i915_batchbuffer { 557718dceddSDavid Howells int start; /* agp offset */ 558718dceddSDavid Howells int used; /* nr bytes in use */ 559718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 560718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 561718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 562718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 563718dceddSDavid Howells } drm_i915_batchbuffer_t; 564718dceddSDavid Howells 565718dceddSDavid Howells /* As above, but pass a pointer to userspace buffer which can be 566718dceddSDavid Howells * validated by the kernel prior to sending to hardware. 567718dceddSDavid Howells */ 568718dceddSDavid Howells typedef struct _drm_i915_cmdbuffer { 569718dceddSDavid Howells char __user *buf; /* pointer to userspace command buffer */ 570718dceddSDavid Howells int sz; /* nr bytes in buf */ 571718dceddSDavid Howells int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 572718dceddSDavid Howells int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 573718dceddSDavid Howells int num_cliprects; /* mulitpass with multiple cliprects? */ 574718dceddSDavid Howells struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 575718dceddSDavid Howells } drm_i915_cmdbuffer_t; 576718dceddSDavid Howells 577718dceddSDavid Howells /* Userspace can request & wait on irq's: 578718dceddSDavid Howells */ 579718dceddSDavid Howells typedef struct drm_i915_irq_emit { 580718dceddSDavid Howells int __user *irq_seq; 581718dceddSDavid Howells } drm_i915_irq_emit_t; 582718dceddSDavid Howells 583718dceddSDavid Howells typedef struct drm_i915_irq_wait { 584718dceddSDavid Howells int irq_seq; 585718dceddSDavid Howells } drm_i915_irq_wait_t; 586718dceddSDavid Howells 5874bdafb9dSChris Wilson /* 5884bdafb9dSChris Wilson * Different modes of per-process Graphics Translation Table, 5894bdafb9dSChris Wilson * see I915_PARAM_HAS_ALIASING_PPGTT 5904bdafb9dSChris Wilson */ 5914bdafb9dSChris Wilson #define I915_GEM_PPGTT_NONE 0 5924bdafb9dSChris Wilson #define I915_GEM_PPGTT_ALIASING 1 5934bdafb9dSChris Wilson #define I915_GEM_PPGTT_FULL 2 5944bdafb9dSChris Wilson 595718dceddSDavid Howells /* Ioctl to query kernel params: 596718dceddSDavid Howells */ 597718dceddSDavid Howells #define I915_PARAM_IRQ_ACTIVE 1 598718dceddSDavid Howells #define I915_PARAM_ALLOW_BATCHBUFFER 2 599718dceddSDavid Howells #define I915_PARAM_LAST_DISPATCH 3 600718dceddSDavid Howells #define I915_PARAM_CHIPSET_ID 4 601718dceddSDavid Howells #define I915_PARAM_HAS_GEM 5 602718dceddSDavid Howells #define I915_PARAM_NUM_FENCES_AVAIL 6 603718dceddSDavid Howells #define I915_PARAM_HAS_OVERLAY 7 604718dceddSDavid Howells #define I915_PARAM_HAS_PAGEFLIPPING 8 605718dceddSDavid Howells #define I915_PARAM_HAS_EXECBUF2 9 606718dceddSDavid Howells #define I915_PARAM_HAS_BSD 10 607718dceddSDavid Howells #define I915_PARAM_HAS_BLT 11 608718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_FENCING 12 609718dceddSDavid Howells #define I915_PARAM_HAS_COHERENT_RINGS 13 610718dceddSDavid Howells #define I915_PARAM_HAS_EXEC_CONSTANTS 14 611718dceddSDavid Howells #define I915_PARAM_HAS_RELAXED_DELTA 15 612718dceddSDavid Howells #define I915_PARAM_HAS_GEN7_SOL_RESET 16 613718dceddSDavid Howells #define I915_PARAM_HAS_LLC 17 614718dceddSDavid Howells #define I915_PARAM_HAS_ALIASING_PPGTT 18 615718dceddSDavid Howells #define I915_PARAM_HAS_WAIT_TIMEOUT 19 616718dceddSDavid Howells #define I915_PARAM_HAS_SEMAPHORES 20 617718dceddSDavid Howells #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 618a1f2cc73SXiang, Haihao #define I915_PARAM_HAS_VEBOX 22 619c2fb7916SDaniel Vetter #define I915_PARAM_HAS_SECURE_BATCHES 23 620b45305fcSDaniel Vetter #define I915_PARAM_HAS_PINNED_BATCHES 24 621ed5982e6SDaniel Vetter #define I915_PARAM_HAS_EXEC_NO_RELOC 25 622eef90ccbSChris Wilson #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 623651d794fSChris Wilson #define I915_PARAM_HAS_WT 27 624d728c8efSBrad Volkin #define I915_PARAM_CMD_PARSER_VERSION 28 6256a2c4232SChris Wilson #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 6261816f923SAkash Goel #define I915_PARAM_MMAP_VERSION 30 62708e16dc8SZhipeng Gong #define I915_PARAM_HAS_BSD2 31 62827cd4461SNeil Roberts #define I915_PARAM_REVISION 32 629a1559ffeSJeff McGee #define I915_PARAM_SUBSLICE_TOTAL 33 630a1559ffeSJeff McGee #define I915_PARAM_EU_TOTAL 34 63149e4d842SChris Wilson #define I915_PARAM_HAS_GPU_RESET 35 632a9ed33caSAbdiel Janulgue #define I915_PARAM_HAS_RESOURCE_STREAMER 36 633506a8e87SChris Wilson #define I915_PARAM_HAS_EXEC_SOFTPIN 37 63437f501afSarun.siluvery@linux.intel.com #define I915_PARAM_HAS_POOLED_EU 38 63537f501afSarun.siluvery@linux.intel.com #define I915_PARAM_MIN_EU_IN_POOL 39 6364cc69075SChris Wilson #define I915_PARAM_MMAP_GTT_VERSION 40 637718dceddSDavid Howells 638bf64e0b0SChris Wilson /* 639bf64e0b0SChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 6400de9136dSChris Wilson * priorities and the driver will attempt to execute batches in priority order. 641bf64e0b0SChris Wilson * The param returns a capability bitmask, nonzero implies that the scheduler 642bf64e0b0SChris Wilson * is enabled, with different features present according to the mask. 643ac14fbd4SChris Wilson * 644ac14fbd4SChris Wilson * The initial priority for each batch is supplied by the context and is 645ac14fbd4SChris Wilson * controlled via I915_CONTEXT_PARAM_PRIORITY. 6460de9136dSChris Wilson */ 6470de9136dSChris Wilson #define I915_PARAM_HAS_SCHEDULER 41 648bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 649bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 650bf64e0b0SChris Wilson #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 651e8861964SChris Wilson #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 652bf73fc0fSChris Wilson #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) 653ee242ca7SMatthew Brost /* 654ee242ca7SMatthew Brost * Indicates the 2k user priority levels are statically mapped into 3 buckets as 655ee242ca7SMatthew Brost * follows: 656ee242ca7SMatthew Brost * 657ee242ca7SMatthew Brost * -1k to -1 Low priority 658ee242ca7SMatthew Brost * 0 Normal priority 659ee242ca7SMatthew Brost * 1 to 1k Highest priority 660ee242ca7SMatthew Brost */ 661ee242ca7SMatthew Brost #define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5) 662bf64e0b0SChris Wilson 663b76c14c8SDaniele Ceraolo Spurio /* 664b76c14c8SDaniele Ceraolo Spurio * Query the status of HuC load. 665b76c14c8SDaniele Ceraolo Spurio * 666b76c14c8SDaniele Ceraolo Spurio * The query can fail in the following scenarios with the listed error codes: 667b76c14c8SDaniele Ceraolo Spurio * -ENODEV if HuC is not present on this platform, 668b76c14c8SDaniele Ceraolo Spurio * -EOPNOTSUPP if HuC firmware usage is disabled, 669b76c14c8SDaniele Ceraolo Spurio * -ENOPKG if HuC firmware fetch failed, 670b76c14c8SDaniele Ceraolo Spurio * -ENOEXEC if HuC firmware is invalid or mismatched, 671b76c14c8SDaniele Ceraolo Spurio * -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC, 672b76c14c8SDaniele Ceraolo Spurio * -EIO if the FW transfer or the FW authentication failed. 673b76c14c8SDaniele Ceraolo Spurio * 674b76c14c8SDaniele Ceraolo Spurio * If the IOCTL is successful, the returned parameter will be set to one of the 675b76c14c8SDaniele Ceraolo Spurio * following values: 676b76c14c8SDaniele Ceraolo Spurio * * 0 if HuC firmware load is not complete, 67798d2722aSDaniele Ceraolo Spurio * * 1 if HuC firmware is loaded and fully authenticated, 67898d2722aSDaniele Ceraolo Spurio * * 2 if HuC firmware is loaded and authenticated for clear media only 679b76c14c8SDaniele Ceraolo Spurio */ 6805464cd65SAnusha Srivatsa #define I915_PARAM_HUC_STATUS 42 6810de9136dSChris Wilson 68277ae9957SChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 68377ae9957SChris Wilson * synchronisation with implicit fencing on individual objects. 68477ae9957SChris Wilson * See EXEC_OBJECT_ASYNC. 68577ae9957SChris Wilson */ 68677ae9957SChris Wilson #define I915_PARAM_HAS_EXEC_ASYNC 43 68777ae9957SChris Wilson 688fec0445cSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 689fec0445cSChris Wilson * both being able to pass in a sync_file fd to wait upon before executing, 690fec0445cSChris Wilson * and being able to return a new sync_file fd that is signaled when the 691fec0445cSChris Wilson * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 692fec0445cSChris Wilson */ 693fec0445cSChris Wilson #define I915_PARAM_HAS_EXEC_FENCE 44 694fec0445cSChris Wilson 695b0fd47adSChris Wilson /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 696afa5cf31SRandy Dunlap * user-specified buffers for post-mortem debugging of GPU hangs. See 697b0fd47adSChris Wilson * EXEC_OBJECT_CAPTURE. 698b0fd47adSChris Wilson */ 699b0fd47adSChris Wilson #define I915_PARAM_HAS_EXEC_CAPTURE 45 700b0fd47adSChris Wilson 7017fed555cSRobert Bragg #define I915_PARAM_SLICE_MASK 46 7027fed555cSRobert Bragg 703f5320233SRobert Bragg /* Assuming it's uniform for each slice, this queries the mask of subslices 704f5320233SRobert Bragg * per-slice for this system. 705f5320233SRobert Bragg */ 706f5320233SRobert Bragg #define I915_PARAM_SUBSLICE_MASK 47 707f5320233SRobert Bragg 7081a71cf2fSChris Wilson /* 7091a71cf2fSChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 7101a71cf2fSChris Wilson * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 7111a71cf2fSChris Wilson */ 7121a71cf2fSChris Wilson #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 7131a71cf2fSChris Wilson 714cf6e7bacSJason Ekstrand /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 715cf6e7bacSJason Ekstrand * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 716cf6e7bacSJason Ekstrand */ 717cf6e7bacSJason Ekstrand #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 718cf6e7bacSJason Ekstrand 719d2b4b979SChris Wilson /* 720d2b4b979SChris Wilson * Query whether every context (both per-file default and user created) is 721d2b4b979SChris Wilson * isolated (insofar as HW supports). If this parameter is not true, then 722d2b4b979SChris Wilson * freshly created contexts may inherit values from an existing context, 723d2b4b979SChris Wilson * rather than default HW values. If true, it also ensures (insofar as HW 724d2b4b979SChris Wilson * supports) that all state set by this context will not leak to any other 725d2b4b979SChris Wilson * context. 726d2b4b979SChris Wilson * 727d2b4b979SChris Wilson * As not every engine across every gen support contexts, the returned 728d2b4b979SChris Wilson * value reports the support of context isolation for individual engines by 729d2b4b979SChris Wilson * returning a bitmask of each engine class set to true if that class supports 730d2b4b979SChris Wilson * isolation. 731d2b4b979SChris Wilson */ 732d2b4b979SChris Wilson #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 733d2b4b979SChris Wilson 734dab91783SLionel Landwerlin /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 735dab91783SLionel Landwerlin * registers. This used to be fixed per platform but from CNL onwards, this 736dab91783SLionel Landwerlin * might vary depending on the parts. 737dab91783SLionel Landwerlin */ 738dab91783SLionel Landwerlin #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 739dab91783SLionel Landwerlin 740900ccf30SChris Wilson /* 741900ccf30SChris Wilson * Once upon a time we supposed that writes through the GGTT would be 742900ccf30SChris Wilson * immediately in physical memory (once flushed out of the CPU path). However, 743900ccf30SChris Wilson * on a few different processors and chipsets, this is not necessarily the case 744900ccf30SChris Wilson * as the writes appear to be buffered internally. Thus a read of the backing 745900ccf30SChris Wilson * storage (physical memory) via a different path (with different physical tags 746900ccf30SChris Wilson * to the indirect write via the GGTT) will see stale values from before 747900ccf30SChris Wilson * the GGTT write. Inside the kernel, we can for the most part keep track of 748900ccf30SChris Wilson * the different read/write domains in use (e.g. set-domain), but the assumption 749900ccf30SChris Wilson * of coherency is baked into the ABI, hence reporting its true state in this 750900ccf30SChris Wilson * parameter. 751900ccf30SChris Wilson * 752900ccf30SChris Wilson * Reports true when writes via mmap_gtt are immediately visible following an 753900ccf30SChris Wilson * lfence to flush the WCB. 754900ccf30SChris Wilson * 755900ccf30SChris Wilson * Reports false when writes via mmap_gtt are indeterminately delayed in an in 756900ccf30SChris Wilson * internal buffer and are _not_ immediately visible to third parties accessing 757900ccf30SChris Wilson * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC 758900ccf30SChris Wilson * communications channel when reporting false is strongly disadvised. 759900ccf30SChris Wilson */ 760900ccf30SChris Wilson #define I915_PARAM_MMAP_GTT_COHERENT 52 761900ccf30SChris Wilson 762a88b6e4cSChris Wilson /* 763a88b6e4cSChris Wilson * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel 764a88b6e4cSChris Wilson * execution through use of explicit fence support. 765a88b6e4cSChris Wilson * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. 766a88b6e4cSChris Wilson */ 767a88b6e4cSChris Wilson #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 768b8d49f28SLionel Landwerlin 769b8d49f28SLionel Landwerlin /* 770b8d49f28SLionel Landwerlin * Revision of the i915-perf uAPI. The value returned helps determine what 771b8d49f28SLionel Landwerlin * i915-perf features are available. See drm_i915_perf_property_id. 772b8d49f28SLionel Landwerlin */ 773b8d49f28SLionel Landwerlin #define I915_PARAM_PERF_REVISION 54 774b8d49f28SLionel Landwerlin 77513149e8bSLionel Landwerlin /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 77613149e8bSLionel Landwerlin * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See 77713149e8bSLionel Landwerlin * I915_EXEC_USE_EXTENSIONS. 77813149e8bSLionel Landwerlin */ 77913149e8bSLionel Landwerlin #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 78013149e8bSLionel Landwerlin 781b65a9489SChris Wilson /* Query if the kernel supports the I915_USERPTR_PROBE flag. */ 782b65a9489SChris Wilson #define I915_PARAM_HAS_USERPTR_PROBE 56 783b65a9489SChris Wilson 784bc7ed4d3SUmesh Nerlige Ramappa /* 785bc7ed4d3SUmesh Nerlige Ramappa * Frequency of the timestamps in OA reports. This used to be the same as the CS 786bc7ed4d3SUmesh Nerlige Ramappa * timestamp frequency, but differs on some platforms. 787bc7ed4d3SUmesh Nerlige Ramappa */ 788bc7ed4d3SUmesh Nerlige Ramappa #define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57 789bc7ed4d3SUmesh Nerlige Ramappa 790d1da138fSAlan Previn /* 791d1da138fSAlan Previn * Query the status of PXP support in i915. 792d1da138fSAlan Previn * 793d1da138fSAlan Previn * The query can fail in the following scenarios with the listed error codes: 794d1da138fSAlan Previn * -ENODEV = PXP support is not available on the GPU device or in the 795d1da138fSAlan Previn * kernel due to missing component drivers or kernel configs. 796d1da138fSAlan Previn * 797d1da138fSAlan Previn * If the IOCTL is successful, the returned parameter will be set to one of 798d1da138fSAlan Previn * the following values: 799d1da138fSAlan Previn * 1 = PXP feature is supported and is ready for use. 800d1da138fSAlan Previn * 2 = PXP feature is supported but should be ready soon (pending 801d1da138fSAlan Previn * initialization of non-i915 system dependencies). 802d1da138fSAlan Previn * 803d1da138fSAlan Previn * NOTE: When param is supported (positive return values), user space should 804d1da138fSAlan Previn * still refer to the GEM PXP context-creation UAPI header specs to be 805d1da138fSAlan Previn * aware of possible failure due to system state machine at the time. 806d1da138fSAlan Previn */ 807d1da138fSAlan Previn #define I915_PARAM_PXP_STATUS 58 808d1da138fSAlan Previn 809cec82816SVinay Belgaumkar /* 810cec82816SVinay Belgaumkar * Query if kernel allows marking a context to send a Freq hint to SLPC. This 811cec82816SVinay Belgaumkar * will enable use of the strategies allowed by the SLPC algorithm. 812cec82816SVinay Belgaumkar */ 813cec82816SVinay Belgaumkar #define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59 814cec82816SVinay Belgaumkar 815be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 816be03564bSChris Wilson 817a913bde8SNiranjana Vishwanathapura /** 818a913bde8SNiranjana Vishwanathapura * struct drm_i915_getparam - Driver parameter query structure. 819a913bde8SNiranjana Vishwanathapura */ 820a913bde8SNiranjana Vishwanathapura struct drm_i915_getparam { 821a913bde8SNiranjana Vishwanathapura /** @param: Driver parameter to query. */ 82216f7249dSArtem Savkov __s32 param; 823a913bde8SNiranjana Vishwanathapura 824a913bde8SNiranjana Vishwanathapura /** 825a913bde8SNiranjana Vishwanathapura * @value: Address of memory where queried value should be put. 826a913bde8SNiranjana Vishwanathapura * 827346add78SDaniel Vetter * WARNING: Using pointers instead of fixed-size u64 means we need to write 828346add78SDaniel Vetter * compat32 code. Don't repeat this mistake. 829346add78SDaniel Vetter */ 830718dceddSDavid Howells int __user *value; 831a913bde8SNiranjana Vishwanathapura }; 832a913bde8SNiranjana Vishwanathapura 833a913bde8SNiranjana Vishwanathapura /** 834a913bde8SNiranjana Vishwanathapura * typedef drm_i915_getparam_t - Driver parameter query structure. 835a913bde8SNiranjana Vishwanathapura * See struct drm_i915_getparam. 836a913bde8SNiranjana Vishwanathapura */ 837a913bde8SNiranjana Vishwanathapura typedef struct drm_i915_getparam drm_i915_getparam_t; 838718dceddSDavid Howells 839718dceddSDavid Howells /* Ioctl to set kernel params: 840718dceddSDavid Howells */ 841718dceddSDavid Howells #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 842718dceddSDavid Howells #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 843718dceddSDavid Howells #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 844718dceddSDavid Howells #define I915_SETPARAM_NUM_USED_FENCES 4 845be03564bSChris Wilson /* Must be kept compact -- no holes */ 846718dceddSDavid Howells 847718dceddSDavid Howells typedef struct drm_i915_setparam { 848718dceddSDavid Howells int param; 849718dceddSDavid Howells int value; 850718dceddSDavid Howells } drm_i915_setparam_t; 851718dceddSDavid Howells 852718dceddSDavid Howells /* A memory manager for regions of shared memory: 853718dceddSDavid Howells */ 854718dceddSDavid Howells #define I915_MEM_REGION_AGP 1 855718dceddSDavid Howells 856718dceddSDavid Howells typedef struct drm_i915_mem_alloc { 857718dceddSDavid Howells int region; 858718dceddSDavid Howells int alignment; 859718dceddSDavid Howells int size; 860718dceddSDavid Howells int __user *region_offset; /* offset from start of fb or agp */ 861718dceddSDavid Howells } drm_i915_mem_alloc_t; 862718dceddSDavid Howells 863718dceddSDavid Howells typedef struct drm_i915_mem_free { 864718dceddSDavid Howells int region; 865718dceddSDavid Howells int region_offset; 866718dceddSDavid Howells } drm_i915_mem_free_t; 867718dceddSDavid Howells 868718dceddSDavid Howells typedef struct drm_i915_mem_init_heap { 869718dceddSDavid Howells int region; 870718dceddSDavid Howells int size; 871718dceddSDavid Howells int start; 872718dceddSDavid Howells } drm_i915_mem_init_heap_t; 873718dceddSDavid Howells 874718dceddSDavid Howells /* Allow memory manager to be torn down and re-initialized (eg on 875718dceddSDavid Howells * rotate): 876718dceddSDavid Howells */ 877718dceddSDavid Howells typedef struct drm_i915_mem_destroy_heap { 878718dceddSDavid Howells int region; 879718dceddSDavid Howells } drm_i915_mem_destroy_heap_t; 880718dceddSDavid Howells 881718dceddSDavid Howells /* Allow X server to configure which pipes to monitor for vblank signals 882718dceddSDavid Howells */ 883718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_A 1 884718dceddSDavid Howells #define DRM_I915_VBLANK_PIPE_B 2 885718dceddSDavid Howells 886718dceddSDavid Howells typedef struct drm_i915_vblank_pipe { 887718dceddSDavid Howells int pipe; 888718dceddSDavid Howells } drm_i915_vblank_pipe_t; 889718dceddSDavid Howells 890718dceddSDavid Howells /* Schedule buffer swap at given vertical blank: 891718dceddSDavid Howells */ 892718dceddSDavid Howells typedef struct drm_i915_vblank_swap { 893718dceddSDavid Howells drm_drawable_t drawable; 894718dceddSDavid Howells enum drm_vblank_seq_type seqtype; 895718dceddSDavid Howells unsigned int sequence; 896718dceddSDavid Howells } drm_i915_vblank_swap_t; 897718dceddSDavid Howells 898718dceddSDavid Howells typedef struct drm_i915_hws_addr { 899718dceddSDavid Howells __u64 addr; 900718dceddSDavid Howells } drm_i915_hws_addr_t; 901718dceddSDavid Howells 902718dceddSDavid Howells struct drm_i915_gem_init { 903718dceddSDavid Howells /** 904718dceddSDavid Howells * Beginning offset in the GTT to be managed by the DRM memory 905718dceddSDavid Howells * manager. 906718dceddSDavid Howells */ 907718dceddSDavid Howells __u64 gtt_start; 908718dceddSDavid Howells /** 909718dceddSDavid Howells * Ending offset in the GTT to be managed by the DRM memory 910718dceddSDavid Howells * manager. 911718dceddSDavid Howells */ 912718dceddSDavid Howells __u64 gtt_end; 913718dceddSDavid Howells }; 914718dceddSDavid Howells 915718dceddSDavid Howells struct drm_i915_gem_create { 916718dceddSDavid Howells /** 917718dceddSDavid Howells * Requested size for the object. 918718dceddSDavid Howells * 919718dceddSDavid Howells * The (page-aligned) allocated size for the object will be returned. 920718dceddSDavid Howells */ 921718dceddSDavid Howells __u64 size; 922718dceddSDavid Howells /** 923718dceddSDavid Howells * Returned handle for the object. 924718dceddSDavid Howells * 925718dceddSDavid Howells * Object handles are nonzero. 926718dceddSDavid Howells */ 927718dceddSDavid Howells __u32 handle; 928718dceddSDavid Howells __u32 pad; 929718dceddSDavid Howells }; 930718dceddSDavid Howells 931718dceddSDavid Howells struct drm_i915_gem_pread { 932718dceddSDavid Howells /** Handle for the object being read. */ 933718dceddSDavid Howells __u32 handle; 934718dceddSDavid Howells __u32 pad; 935718dceddSDavid Howells /** Offset into the object to read from */ 936718dceddSDavid Howells __u64 offset; 937718dceddSDavid Howells /** Length of data to read */ 938718dceddSDavid Howells __u64 size; 939718dceddSDavid Howells /** 940718dceddSDavid Howells * Pointer to write the data into. 941718dceddSDavid Howells * 942718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 943718dceddSDavid Howells */ 944718dceddSDavid Howells __u64 data_ptr; 945718dceddSDavid Howells }; 946718dceddSDavid Howells 947718dceddSDavid Howells struct drm_i915_gem_pwrite { 948718dceddSDavid Howells /** Handle for the object being written to. */ 949718dceddSDavid Howells __u32 handle; 950718dceddSDavid Howells __u32 pad; 951718dceddSDavid Howells /** Offset into the object to write to */ 952718dceddSDavid Howells __u64 offset; 953718dceddSDavid Howells /** Length of data to write */ 954718dceddSDavid Howells __u64 size; 955718dceddSDavid Howells /** 956718dceddSDavid Howells * Pointer to read the data from. 957718dceddSDavid Howells * 958718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 959718dceddSDavid Howells */ 960718dceddSDavid Howells __u64 data_ptr; 961718dceddSDavid Howells }; 962718dceddSDavid Howells 963718dceddSDavid Howells struct drm_i915_gem_mmap { 964718dceddSDavid Howells /** Handle for the object being mapped. */ 965718dceddSDavid Howells __u32 handle; 966718dceddSDavid Howells __u32 pad; 967718dceddSDavid Howells /** Offset in the object to map. */ 968718dceddSDavid Howells __u64 offset; 969718dceddSDavid Howells /** 970718dceddSDavid Howells * Length of data to map. 971718dceddSDavid Howells * 972718dceddSDavid Howells * The value will be page-aligned. 973718dceddSDavid Howells */ 974718dceddSDavid Howells __u64 size; 975718dceddSDavid Howells /** 976718dceddSDavid Howells * Returned pointer the data was mapped at. 977718dceddSDavid Howells * 978718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 979718dceddSDavid Howells */ 980718dceddSDavid Howells __u64 addr_ptr; 9811816f923SAkash Goel 9821816f923SAkash Goel /** 9831816f923SAkash Goel * Flags for extended behaviour. 9841816f923SAkash Goel * 9851816f923SAkash Goel * Added in version 2. 9861816f923SAkash Goel */ 9871816f923SAkash Goel __u64 flags; 9881816f923SAkash Goel #define I915_MMAP_WC 0x1 989718dceddSDavid Howells }; 990718dceddSDavid Howells 991718dceddSDavid Howells struct drm_i915_gem_mmap_gtt { 992718dceddSDavid Howells /** Handle for the object being mapped. */ 993718dceddSDavid Howells __u32 handle; 994718dceddSDavid Howells __u32 pad; 995718dceddSDavid Howells /** 996718dceddSDavid Howells * Fake offset to use for subsequent mmap call 997718dceddSDavid Howells * 998718dceddSDavid Howells * This is a fixed-size type for 32/64 compatibility. 999718dceddSDavid Howells */ 1000718dceddSDavid Howells __u64 offset; 1001718dceddSDavid Howells }; 1002718dceddSDavid Howells 10037961c5b6SMaarten Lankhorst /** 10047961c5b6SMaarten Lankhorst * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object. 10057961c5b6SMaarten Lankhorst * 10067961c5b6SMaarten Lankhorst * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl, 10077961c5b6SMaarten Lankhorst * and is used to retrieve the fake offset to mmap an object specified by &handle. 10087961c5b6SMaarten Lankhorst * 10097961c5b6SMaarten Lankhorst * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+. 10107961c5b6SMaarten Lankhorst * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave 10117961c5b6SMaarten Lankhorst * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`. 10127961c5b6SMaarten Lankhorst */ 1013cc662126SAbdiel Janulgue struct drm_i915_gem_mmap_offset { 10147961c5b6SMaarten Lankhorst /** @handle: Handle for the object being mapped. */ 1015cc662126SAbdiel Janulgue __u32 handle; 10167961c5b6SMaarten Lankhorst /** @pad: Must be zero */ 1017cc662126SAbdiel Janulgue __u32 pad; 1018cc662126SAbdiel Janulgue /** 10197961c5b6SMaarten Lankhorst * @offset: The fake offset to use for subsequent mmap call 1020cc662126SAbdiel Janulgue * 1021cc662126SAbdiel Janulgue * This is a fixed-size type for 32/64 compatibility. 1022cc662126SAbdiel Janulgue */ 1023cc662126SAbdiel Janulgue __u64 offset; 1024cc662126SAbdiel Janulgue 1025cc662126SAbdiel Janulgue /** 10267961c5b6SMaarten Lankhorst * @flags: Flags for extended behaviour. 1027cc662126SAbdiel Janulgue * 10287961c5b6SMaarten Lankhorst * It is mandatory that one of the `MMAP_OFFSET` types 10297961c5b6SMaarten Lankhorst * should be included: 10307961c5b6SMaarten Lankhorst * 10317961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined) 10327961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching. 10337961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching. 10347961c5b6SMaarten Lankhorst * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching. 10357961c5b6SMaarten Lankhorst * 10367961c5b6SMaarten Lankhorst * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid 10377961c5b6SMaarten Lankhorst * type. On devices without local memory, this caching mode is invalid. 10387961c5b6SMaarten Lankhorst * 10397961c5b6SMaarten Lankhorst * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will 10407961c5b6SMaarten Lankhorst * be used, depending on the object placement on creation. WB will be used 10417961c5b6SMaarten Lankhorst * when the object can only exist in system memory, WC otherwise. 1042cc662126SAbdiel Janulgue */ 1043cc662126SAbdiel Janulgue __u64 flags; 10447961c5b6SMaarten Lankhorst 1045cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_GTT 0 1046cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WC 1 1047cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_WB 2 1048cc662126SAbdiel Janulgue #define I915_MMAP_OFFSET_UC 3 10497961c5b6SMaarten Lankhorst #define I915_MMAP_OFFSET_FIXED 4 1050cc662126SAbdiel Janulgue 10517961c5b6SMaarten Lankhorst /** 10527961c5b6SMaarten Lankhorst * @extensions: Zero-terminated chain of extensions. 1053cc662126SAbdiel Janulgue * 1054cc662126SAbdiel Janulgue * No current extensions defined; mbz. 1055cc662126SAbdiel Janulgue */ 1056cc662126SAbdiel Janulgue __u64 extensions; 1057cc662126SAbdiel Janulgue }; 1058cc662126SAbdiel Janulgue 10593aa8c57fSMatthew Auld /** 10603aa8c57fSMatthew Auld * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in 10613aa8c57fSMatthew Auld * preparation for accessing the pages via some CPU domain. 10623aa8c57fSMatthew Auld * 10633aa8c57fSMatthew Auld * Specifying a new write or read domain will flush the object out of the 10643aa8c57fSMatthew Auld * previous domain(if required), before then updating the objects domain 10653aa8c57fSMatthew Auld * tracking with the new domain. 10663aa8c57fSMatthew Auld * 10673aa8c57fSMatthew Auld * Note this might involve waiting for the object first if it is still active on 10683aa8c57fSMatthew Auld * the GPU. 10693aa8c57fSMatthew Auld * 10703aa8c57fSMatthew Auld * Supported values for @read_domains and @write_domain: 10713aa8c57fSMatthew Auld * 10723aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_WC: Uncached write-combined domain 10733aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_CPU: CPU cache domain 10743aa8c57fSMatthew Auld * - I915_GEM_DOMAIN_GTT: Mappable aperture domain 10753aa8c57fSMatthew Auld * 10763aa8c57fSMatthew Auld * All other domains are rejected. 107781340cf3SMatthew Auld * 107881340cf3SMatthew Auld * Note that for discrete, starting from DG1, this is no longer supported, and 107981340cf3SMatthew Auld * is instead rejected. On such platforms the CPU domain is effectively static, 108081340cf3SMatthew Auld * where we also only support a single &drm_i915_gem_mmap_offset cache mode, 108181340cf3SMatthew Auld * which can't be set explicitly and instead depends on the object placements, 108281340cf3SMatthew Auld * as per the below. 108381340cf3SMatthew Auld * 108481340cf3SMatthew Auld * Implicit caching rules, starting from DG1: 108581340cf3SMatthew Auld * 108681340cf3SMatthew Auld * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 108781340cf3SMatthew Auld * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 108881340cf3SMatthew Auld * mapped as write-combined only. 108981340cf3SMatthew Auld * 109081340cf3SMatthew Auld * - Everything else is always allocated and mapped as write-back, with the 109181340cf3SMatthew Auld * guarantee that everything is also coherent with the GPU. 109281340cf3SMatthew Auld * 109381340cf3SMatthew Auld * Note that this is likely to change in the future again, where we might need 109481340cf3SMatthew Auld * more flexibility on future devices, so making this all explicit as part of a 109581340cf3SMatthew Auld * new &drm_i915_gem_create_ext extension is probable. 10963aa8c57fSMatthew Auld */ 1097718dceddSDavid Howells struct drm_i915_gem_set_domain { 10983aa8c57fSMatthew Auld /** @handle: Handle for the object. */ 1099718dceddSDavid Howells __u32 handle; 1100718dceddSDavid Howells 11013aa8c57fSMatthew Auld /** @read_domains: New read domains. */ 1102718dceddSDavid Howells __u32 read_domains; 1103718dceddSDavid Howells 11043aa8c57fSMatthew Auld /** 11053aa8c57fSMatthew Auld * @write_domain: New write domain. 11063aa8c57fSMatthew Auld * 11073aa8c57fSMatthew Auld * Note that having something in the write domain implies it's in the 11083aa8c57fSMatthew Auld * read domain, and only that read domain. 11093aa8c57fSMatthew Auld */ 1110718dceddSDavid Howells __u32 write_domain; 1111718dceddSDavid Howells }; 1112718dceddSDavid Howells 1113718dceddSDavid Howells struct drm_i915_gem_sw_finish { 1114718dceddSDavid Howells /** Handle for the object */ 1115718dceddSDavid Howells __u32 handle; 1116718dceddSDavid Howells }; 1117718dceddSDavid Howells 1118718dceddSDavid Howells struct drm_i915_gem_relocation_entry { 1119718dceddSDavid Howells /** 1120718dceddSDavid Howells * Handle of the buffer being pointed to by this relocation entry. 1121718dceddSDavid Howells * 1122718dceddSDavid Howells * It's appealing to make this be an index into the mm_validate_entry 1123718dceddSDavid Howells * list to refer to the buffer, but this allows the driver to create 1124718dceddSDavid Howells * a relocation list for state buffers and not re-write it per 1125718dceddSDavid Howells * exec using the buffer. 1126718dceddSDavid Howells */ 1127718dceddSDavid Howells __u32 target_handle; 1128718dceddSDavid Howells 1129718dceddSDavid Howells /** 1130718dceddSDavid Howells * Value to be added to the offset of the target buffer to make up 1131718dceddSDavid Howells * the relocation entry. 1132718dceddSDavid Howells */ 1133718dceddSDavid Howells __u32 delta; 1134718dceddSDavid Howells 1135718dceddSDavid Howells /** Offset in the buffer the relocation entry will be written into */ 1136718dceddSDavid Howells __u64 offset; 1137718dceddSDavid Howells 1138718dceddSDavid Howells /** 1139718dceddSDavid Howells * Offset value of the target buffer that the relocation entry was last 1140718dceddSDavid Howells * written as. 1141718dceddSDavid Howells * 1142718dceddSDavid Howells * If the buffer has the same offset as last time, we can skip syncing 1143718dceddSDavid Howells * and writing the relocation. This value is written back out by 1144718dceddSDavid Howells * the execbuffer ioctl when the relocation is written. 1145718dceddSDavid Howells */ 1146718dceddSDavid Howells __u64 presumed_offset; 1147718dceddSDavid Howells 1148718dceddSDavid Howells /** 1149718dceddSDavid Howells * Target memory domains read by this operation. 1150718dceddSDavid Howells */ 1151718dceddSDavid Howells __u32 read_domains; 1152718dceddSDavid Howells 1153718dceddSDavid Howells /** 1154718dceddSDavid Howells * Target memory domains written by this operation. 1155718dceddSDavid Howells * 1156718dceddSDavid Howells * Note that only one domain may be written by the whole 1157718dceddSDavid Howells * execbuffer operation, so that where there are conflicts, 1158718dceddSDavid Howells * the application will get -EINVAL back. 1159718dceddSDavid Howells */ 1160718dceddSDavid Howells __u32 write_domain; 1161718dceddSDavid Howells }; 1162718dceddSDavid Howells 1163718dceddSDavid Howells /** @{ 1164718dceddSDavid Howells * Intel memory domains 1165718dceddSDavid Howells * 1166718dceddSDavid Howells * Most of these just align with the various caches in 1167718dceddSDavid Howells * the system and are used to flush and invalidate as 1168718dceddSDavid Howells * objects end up cached in different domains. 1169718dceddSDavid Howells */ 1170718dceddSDavid Howells /** CPU cache */ 1171718dceddSDavid Howells #define I915_GEM_DOMAIN_CPU 0x00000001 1172718dceddSDavid Howells /** Render cache, used by 2D and 3D drawing */ 1173718dceddSDavid Howells #define I915_GEM_DOMAIN_RENDER 0x00000002 1174718dceddSDavid Howells /** Sampler cache, used by texture engine */ 1175718dceddSDavid Howells #define I915_GEM_DOMAIN_SAMPLER 0x00000004 1176718dceddSDavid Howells /** Command queue, used to load batch buffers */ 1177718dceddSDavid Howells #define I915_GEM_DOMAIN_COMMAND 0x00000008 1178718dceddSDavid Howells /** Instruction cache, used by shader programs */ 1179718dceddSDavid Howells #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 1180718dceddSDavid Howells /** Vertex address cache */ 1181718dceddSDavid Howells #define I915_GEM_DOMAIN_VERTEX 0x00000020 1182718dceddSDavid Howells /** GTT domain - aperture and scanout */ 1183718dceddSDavid Howells #define I915_GEM_DOMAIN_GTT 0x00000040 1184e22d8e3cSChris Wilson /** WC domain - uncached access */ 1185e22d8e3cSChris Wilson #define I915_GEM_DOMAIN_WC 0x00000080 1186718dceddSDavid Howells /** @} */ 1187718dceddSDavid Howells 1188718dceddSDavid Howells struct drm_i915_gem_exec_object { 1189718dceddSDavid Howells /** 1190718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 1191718dceddSDavid Howells * operation. 1192718dceddSDavid Howells */ 1193718dceddSDavid Howells __u32 handle; 1194718dceddSDavid Howells 1195718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 1196718dceddSDavid Howells __u32 relocation_count; 1197718dceddSDavid Howells /** 1198718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 1199718dceddSDavid Howells * the relocations to be performed in this buffer. 1200718dceddSDavid Howells */ 1201718dceddSDavid Howells __u64 relocs_ptr; 1202718dceddSDavid Howells 1203718dceddSDavid Howells /** Required alignment in graphics aperture */ 1204718dceddSDavid Howells __u64 alignment; 1205718dceddSDavid Howells 1206718dceddSDavid Howells /** 1207718dceddSDavid Howells * Returned value of the updated offset of the object, for future 1208718dceddSDavid Howells * presumed_offset writes. 1209718dceddSDavid Howells */ 1210718dceddSDavid Howells __u64 offset; 1211718dceddSDavid Howells }; 1212718dceddSDavid Howells 1213b5b6f6a6SJason Ekstrand /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */ 1214718dceddSDavid Howells struct drm_i915_gem_execbuffer { 1215718dceddSDavid Howells /** 1216718dceddSDavid Howells * List of buffers to be validated with their relocations to be 1217718dceddSDavid Howells * performend on them. 1218718dceddSDavid Howells * 1219718dceddSDavid Howells * This is a pointer to an array of struct drm_i915_gem_validate_entry. 1220718dceddSDavid Howells * 1221718dceddSDavid Howells * These buffers must be listed in an order such that all relocations 1222718dceddSDavid Howells * a buffer is performing refer to buffers that have already appeared 1223718dceddSDavid Howells * in the validate list. 1224718dceddSDavid Howells */ 1225718dceddSDavid Howells __u64 buffers_ptr; 1226718dceddSDavid Howells __u32 buffer_count; 1227718dceddSDavid Howells 1228718dceddSDavid Howells /** Offset in the batchbuffer to start execution from. */ 1229718dceddSDavid Howells __u32 batch_start_offset; 1230718dceddSDavid Howells /** Bytes used in batchbuffer from batch_start_offset */ 1231718dceddSDavid Howells __u32 batch_len; 1232718dceddSDavid Howells __u32 DR1; 1233718dceddSDavid Howells __u32 DR4; 1234718dceddSDavid Howells __u32 num_cliprects; 1235718dceddSDavid Howells /** This is a struct drm_clip_rect *cliprects */ 1236718dceddSDavid Howells __u64 cliprects_ptr; 1237718dceddSDavid Howells }; 1238718dceddSDavid Howells 1239718dceddSDavid Howells struct drm_i915_gem_exec_object2 { 1240718dceddSDavid Howells /** 1241718dceddSDavid Howells * User's handle for a buffer to be bound into the GTT for this 1242718dceddSDavid Howells * operation. 1243718dceddSDavid Howells */ 1244718dceddSDavid Howells __u32 handle; 1245718dceddSDavid Howells 1246718dceddSDavid Howells /** Number of relocations to be performed on this buffer */ 1247718dceddSDavid Howells __u32 relocation_count; 1248718dceddSDavid Howells /** 1249718dceddSDavid Howells * Pointer to array of struct drm_i915_gem_relocation_entry containing 1250718dceddSDavid Howells * the relocations to be performed in this buffer. 1251718dceddSDavid Howells */ 1252718dceddSDavid Howells __u64 relocs_ptr; 1253718dceddSDavid Howells 1254718dceddSDavid Howells /** Required alignment in graphics aperture */ 1255718dceddSDavid Howells __u64 alignment; 1256718dceddSDavid Howells 1257718dceddSDavid Howells /** 1258506a8e87SChris Wilson * When the EXEC_OBJECT_PINNED flag is specified this is populated by 1259506a8e87SChris Wilson * the user with the GTT offset at which this object will be pinned. 1260caa574ffSMatthew Auld * 1261506a8e87SChris Wilson * When the I915_EXEC_NO_RELOC flag is specified this must contain the 1262506a8e87SChris Wilson * presumed_offset of the object. 1263caa574ffSMatthew Auld * 1264506a8e87SChris Wilson * During execbuffer2 the kernel populates it with the value of the 1265506a8e87SChris Wilson * current GTT offset of the object, for future presumed_offset writes. 1266caa574ffSMatthew Auld * 1267caa574ffSMatthew Auld * See struct drm_i915_gem_create_ext for the rules when dealing with 1268caa574ffSMatthew Auld * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with 1269caa574ffSMatthew Auld * minimum page sizes, like DG2. 1270718dceddSDavid Howells */ 1271718dceddSDavid Howells __u64 offset; 1272718dceddSDavid Howells 1273718dceddSDavid Howells #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 1274ed5982e6SDaniel Vetter #define EXEC_OBJECT_NEEDS_GTT (1<<1) 1275ed5982e6SDaniel Vetter #define EXEC_OBJECT_WRITE (1<<2) 1276101b506aSMichel Thierry #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 1277506a8e87SChris Wilson #define EXEC_OBJECT_PINNED (1<<4) 127891b2db6fSChris Wilson #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 127977ae9957SChris Wilson /* The kernel implicitly tracks GPU activity on all GEM objects, and 128077ae9957SChris Wilson * synchronises operations with outstanding rendering. This includes 128177ae9957SChris Wilson * rendering on other devices if exported via dma-buf. However, sometimes 128277ae9957SChris Wilson * this tracking is too coarse and the user knows better. For example, 128377ae9957SChris Wilson * if the object is split into non-overlapping ranges shared between different 128477ae9957SChris Wilson * clients or engines (i.e. suballocating objects), the implicit tracking 128577ae9957SChris Wilson * by kernel assumes that each operation affects the whole object rather 128677ae9957SChris Wilson * than an individual range, causing needless synchronisation between clients. 128777ae9957SChris Wilson * The kernel will also forgo any CPU cache flushes prior to rendering from 128877ae9957SChris Wilson * the object as the client is expected to be also handling such domain 128977ae9957SChris Wilson * tracking. 129077ae9957SChris Wilson * 129177ae9957SChris Wilson * The kernel maintains the implicit tracking in order to manage resources 129277ae9957SChris Wilson * used by the GPU - this flag only disables the synchronisation prior to 129377ae9957SChris Wilson * rendering with this object in this execbuf. 129477ae9957SChris Wilson * 129577ae9957SChris Wilson * Opting out of implicit synhronisation requires the user to do its own 129677ae9957SChris Wilson * explicit tracking to avoid rendering corruption. See, for example, 129777ae9957SChris Wilson * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 129877ae9957SChris Wilson */ 129977ae9957SChris Wilson #define EXEC_OBJECT_ASYNC (1<<6) 1300b0fd47adSChris Wilson /* Request that the contents of this execobject be copied into the error 1301b0fd47adSChris Wilson * state upon a GPU hang involving this batch for post-mortem debugging. 1302b0fd47adSChris Wilson * These buffers are recorded in no particular order as "user" in 1303b0fd47adSChris Wilson * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 1304b0fd47adSChris Wilson * if the kernel supports this flag. 1305b0fd47adSChris Wilson */ 1306b0fd47adSChris Wilson #define EXEC_OBJECT_CAPTURE (1<<7) 13079e2793f6SDave Gordon /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 1308b0fd47adSChris Wilson #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 1309718dceddSDavid Howells __u64 flags; 1310ed5982e6SDaniel Vetter 131191b2db6fSChris Wilson union { 1312718dceddSDavid Howells __u64 rsvd1; 131391b2db6fSChris Wilson __u64 pad_to_size; 131491b2db6fSChris Wilson }; 1315718dceddSDavid Howells __u64 rsvd2; 1316718dceddSDavid Howells }; 1317718dceddSDavid Howells 1318cf6e7bacSJason Ekstrand /** 1319a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf 1320a913bde8SNiranjana Vishwanathapura * ioctl. 1321a913bde8SNiranjana Vishwanathapura * 1322a913bde8SNiranjana Vishwanathapura * The request will wait for input fence to signal before submission. 1323a913bde8SNiranjana Vishwanathapura * 1324a913bde8SNiranjana Vishwanathapura * The returned output fence will be signaled after the completion of the 1325a913bde8SNiranjana Vishwanathapura * request. 1326cf6e7bacSJason Ekstrand */ 1327a913bde8SNiranjana Vishwanathapura struct drm_i915_gem_exec_fence { 1328a913bde8SNiranjana Vishwanathapura /** @handle: User's handle for a drm_syncobj to wait on or signal. */ 1329cf6e7bacSJason Ekstrand __u32 handle; 1330cf6e7bacSJason Ekstrand 1331a913bde8SNiranjana Vishwanathapura /** 1332a913bde8SNiranjana Vishwanathapura * @flags: Supported flags are: 1333a913bde8SNiranjana Vishwanathapura * 1334a913bde8SNiranjana Vishwanathapura * I915_EXEC_FENCE_WAIT: 1335a913bde8SNiranjana Vishwanathapura * Wait for the input fence before request submission. 1336a913bde8SNiranjana Vishwanathapura * 1337a913bde8SNiranjana Vishwanathapura * I915_EXEC_FENCE_SIGNAL: 1338a913bde8SNiranjana Vishwanathapura * Return request completion fence as output 1339a913bde8SNiranjana Vishwanathapura */ 1340a913bde8SNiranjana Vishwanathapura __u32 flags; 1341cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_WAIT (1<<0) 1342cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_SIGNAL (1<<1) 1343ebcaa1ffSTvrtko Ursulin #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 1344cf6e7bacSJason Ekstrand }; 1345cf6e7bacSJason Ekstrand 1346a913bde8SNiranjana Vishwanathapura /** 1347a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences 1348a913bde8SNiranjana Vishwanathapura * for execbuf ioctl. 1349a913bde8SNiranjana Vishwanathapura * 135013149e8bSLionel Landwerlin * This structure describes an array of drm_syncobj and associated points for 135113149e8bSLionel Landwerlin * timeline variants of drm_syncobj. It is invalid to append this structure to 135213149e8bSLionel Landwerlin * the execbuf if I915_EXEC_FENCE_ARRAY is set. 135313149e8bSLionel Landwerlin */ 135413149e8bSLionel Landwerlin struct drm_i915_gem_execbuffer_ext_timeline_fences { 1355a913bde8SNiranjana Vishwanathapura #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 1356a913bde8SNiranjana Vishwanathapura /** @base: Extension link. See struct i915_user_extension. */ 135713149e8bSLionel Landwerlin struct i915_user_extension base; 135813149e8bSLionel Landwerlin 135913149e8bSLionel Landwerlin /** 1360a913bde8SNiranjana Vishwanathapura * @fence_count: Number of elements in the @handles_ptr & @value_ptr 1361a913bde8SNiranjana Vishwanathapura * arrays. 136213149e8bSLionel Landwerlin */ 136313149e8bSLionel Landwerlin __u64 fence_count; 136413149e8bSLionel Landwerlin 136513149e8bSLionel Landwerlin /** 1366a913bde8SNiranjana Vishwanathapura * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence 1367a913bde8SNiranjana Vishwanathapura * of length @fence_count. 136813149e8bSLionel Landwerlin */ 136913149e8bSLionel Landwerlin __u64 handles_ptr; 137013149e8bSLionel Landwerlin 137113149e8bSLionel Landwerlin /** 1372a913bde8SNiranjana Vishwanathapura * @values_ptr: Pointer to an array of u64 values of length 1373a913bde8SNiranjana Vishwanathapura * @fence_count. 1374a913bde8SNiranjana Vishwanathapura * Values must be 0 for a binary drm_syncobj. A Value of 0 for a 1375a913bde8SNiranjana Vishwanathapura * timeline drm_syncobj is invalid as it turns a drm_syncobj into a 1376a913bde8SNiranjana Vishwanathapura * binary one. 137713149e8bSLionel Landwerlin */ 137813149e8bSLionel Landwerlin __u64 values_ptr; 1379cda9edd0SLionel Landwerlin }; 1380cda9edd0SLionel Landwerlin 1381718dceddSDavid Howells /** 1382a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2 1383a913bde8SNiranjana Vishwanathapura * ioctl. 1384718dceddSDavid Howells */ 1385a913bde8SNiranjana Vishwanathapura struct drm_i915_gem_execbuffer2 { 1386a913bde8SNiranjana Vishwanathapura /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */ 1387718dceddSDavid Howells __u64 buffers_ptr; 1388a913bde8SNiranjana Vishwanathapura 1389a913bde8SNiranjana Vishwanathapura /** @buffer_count: Number of elements in @buffers_ptr array */ 1390718dceddSDavid Howells __u32 buffer_count; 1391718dceddSDavid Howells 1392cf6e7bacSJason Ekstrand /** 1393a913bde8SNiranjana Vishwanathapura * @batch_start_offset: Offset in the batchbuffer to start execution 1394a913bde8SNiranjana Vishwanathapura * from. 1395a913bde8SNiranjana Vishwanathapura */ 1396a913bde8SNiranjana Vishwanathapura __u32 batch_start_offset; 1397a913bde8SNiranjana Vishwanathapura 1398a913bde8SNiranjana Vishwanathapura /** 1399a913bde8SNiranjana Vishwanathapura * @batch_len: Length in bytes of the batch buffer, starting from the 1400a913bde8SNiranjana Vishwanathapura * @batch_start_offset. If 0, length is assumed to be the batch buffer 1401a913bde8SNiranjana Vishwanathapura * object size. 1402a913bde8SNiranjana Vishwanathapura */ 1403a913bde8SNiranjana Vishwanathapura __u32 batch_len; 1404a913bde8SNiranjana Vishwanathapura 1405a913bde8SNiranjana Vishwanathapura /** @DR1: deprecated */ 1406a913bde8SNiranjana Vishwanathapura __u32 DR1; 1407a913bde8SNiranjana Vishwanathapura 1408a913bde8SNiranjana Vishwanathapura /** @DR4: deprecated */ 1409a913bde8SNiranjana Vishwanathapura __u32 DR4; 1410a913bde8SNiranjana Vishwanathapura 1411a913bde8SNiranjana Vishwanathapura /** @num_cliprects: See @cliprects_ptr */ 1412a913bde8SNiranjana Vishwanathapura __u32 num_cliprects; 1413a913bde8SNiranjana Vishwanathapura 1414a913bde8SNiranjana Vishwanathapura /** 1415a913bde8SNiranjana Vishwanathapura * @cliprects_ptr: Kernel clipping was a DRI1 misfeature. 1416a913bde8SNiranjana Vishwanathapura * 1417a913bde8SNiranjana Vishwanathapura * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or 1418a913bde8SNiranjana Vishwanathapura * I915_EXEC_USE_EXTENSIONS flags are not set. 1419cda9edd0SLionel Landwerlin * 1420cda9edd0SLionel Landwerlin * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array 1421a913bde8SNiranjana Vishwanathapura * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the 1422a913bde8SNiranjana Vishwanathapura * array. 1423cda9edd0SLionel Landwerlin * 1424cda9edd0SLionel Landwerlin * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a 1425a913bde8SNiranjana Vishwanathapura * single &i915_user_extension and num_cliprects is 0. 1426cf6e7bacSJason Ekstrand */ 1427718dceddSDavid Howells __u64 cliprects_ptr; 1428a913bde8SNiranjana Vishwanathapura 1429a913bde8SNiranjana Vishwanathapura /** @flags: Execbuf flags */ 1430a913bde8SNiranjana Vishwanathapura __u64 flags; 1431d90c06d5SChris Wilson #define I915_EXEC_RING_MASK (0x3f) 1432718dceddSDavid Howells #define I915_EXEC_DEFAULT (0<<0) 1433718dceddSDavid Howells #define I915_EXEC_RENDER (1<<0) 1434718dceddSDavid Howells #define I915_EXEC_BSD (2<<0) 1435718dceddSDavid Howells #define I915_EXEC_BLT (3<<0) 143682f91b6eSXiang, Haihao #define I915_EXEC_VEBOX (4<<0) 1437718dceddSDavid Howells 1438718dceddSDavid Howells /* Used for switching the constants addressing mode on gen4+ RENDER ring. 1439718dceddSDavid Howells * Gen6+ only supports relative addressing to dynamic state (default) and 1440718dceddSDavid Howells * absolute addressing. 1441718dceddSDavid Howells * 1442718dceddSDavid Howells * These flags are ignored for the BSD and BLT rings. 1443718dceddSDavid Howells */ 1444718dceddSDavid Howells #define I915_EXEC_CONSTANTS_MASK (3<<6) 1445718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 1446718dceddSDavid Howells #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 1447718dceddSDavid Howells #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 1448718dceddSDavid Howells 1449718dceddSDavid Howells /** Resets the SO write offset registers for transform feedback on gen7. */ 1450718dceddSDavid Howells #define I915_EXEC_GEN7_SOL_RESET (1<<8) 1451718dceddSDavid Howells 1452c2fb7916SDaniel Vetter /** Request a privileged ("secure") batch buffer. Note only available for 1453c2fb7916SDaniel Vetter * DRM_ROOT_ONLY | DRM_MASTER processes. 1454c2fb7916SDaniel Vetter */ 1455c2fb7916SDaniel Vetter #define I915_EXEC_SECURE (1<<9) 1456c2fb7916SDaniel Vetter 1457b45305fcSDaniel Vetter /** Inform the kernel that the batch is and will always be pinned. This 1458b45305fcSDaniel Vetter * negates the requirement for a workaround to be performed to avoid 1459b45305fcSDaniel Vetter * an incoherent CS (such as can be found on 830/845). If this flag is 1460b45305fcSDaniel Vetter * not passed, the kernel will endeavour to make sure the batch is 1461b45305fcSDaniel Vetter * coherent with the CS before execution. If this flag is passed, 1462b45305fcSDaniel Vetter * userspace assumes the responsibility for ensuring the same. 1463b45305fcSDaniel Vetter */ 1464b45305fcSDaniel Vetter #define I915_EXEC_IS_PINNED (1<<10) 1465b45305fcSDaniel Vetter 1466c3d19d3cSGeert Uytterhoeven /** Provide a hint to the kernel that the command stream and auxiliary 1467ed5982e6SDaniel Vetter * state buffers already holds the correct presumed addresses and so the 1468ed5982e6SDaniel Vetter * relocation process may be skipped if no buffers need to be moved in 1469ed5982e6SDaniel Vetter * preparation for the execbuffer. 1470ed5982e6SDaniel Vetter */ 1471ed5982e6SDaniel Vetter #define I915_EXEC_NO_RELOC (1<<11) 1472ed5982e6SDaniel Vetter 1473eef90ccbSChris Wilson /** Use the reloc.handle as an index into the exec object array rather 1474eef90ccbSChris Wilson * than as the per-file handle. 1475eef90ccbSChris Wilson */ 1476eef90ccbSChris Wilson #define I915_EXEC_HANDLE_LUT (1<<12) 1477eef90ccbSChris Wilson 14788d360dffSZhipeng Gong /** Used for switching BSD rings on the platforms with two BSD rings */ 1479d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_SHIFT (13) 1480d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 1481d9da6aa0STvrtko Ursulin /* default ping-pong mode */ 1482d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 1483d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 1484d9da6aa0STvrtko Ursulin #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 14858d360dffSZhipeng Gong 1486a9ed33caSAbdiel Janulgue /** Tell the kernel that the batchbuffer is processed by 1487a9ed33caSAbdiel Janulgue * the resource streamer. 1488a9ed33caSAbdiel Janulgue */ 1489a9ed33caSAbdiel Janulgue #define I915_EXEC_RESOURCE_STREAMER (1<<15) 1490a9ed33caSAbdiel Janulgue 1491fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1492fec0445cSChris Wilson * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1493fec0445cSChris Wilson * the batch. 1494fec0445cSChris Wilson * 1495fec0445cSChris Wilson * Returns -EINVAL if the sync_file fd cannot be found. 1496fec0445cSChris Wilson */ 1497fec0445cSChris Wilson #define I915_EXEC_FENCE_IN (1<<16) 1498fec0445cSChris Wilson 1499fec0445cSChris Wilson /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1500fec0445cSChris Wilson * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1501fec0445cSChris Wilson * to the caller, and it should be close() after use. (The fd is a regular 1502fec0445cSChris Wilson * file descriptor and will be cleaned up on process termination. It holds 1503fec0445cSChris Wilson * a reference to the request, but nothing else.) 1504fec0445cSChris Wilson * 1505fec0445cSChris Wilson * The sync_file fd can be combined with other sync_file and passed either 1506fec0445cSChris Wilson * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1507fec0445cSChris Wilson * will only occur after this request completes), or to other devices. 1508fec0445cSChris Wilson * 1509fec0445cSChris Wilson * Using I915_EXEC_FENCE_OUT requires use of 1510fec0445cSChris Wilson * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1511fec0445cSChris Wilson * back to userspace. Failure to do so will cause the out-fence to always 1512fec0445cSChris Wilson * be reported as zero, and the real fence fd to be leaked. 1513fec0445cSChris Wilson */ 1514fec0445cSChris Wilson #define I915_EXEC_FENCE_OUT (1<<17) 1515fec0445cSChris Wilson 15161a71cf2fSChris Wilson /* 15171a71cf2fSChris Wilson * Traditionally the execbuf ioctl has only considered the final element in 15181a71cf2fSChris Wilson * the execobject[] to be the executable batch. Often though, the client 15191a71cf2fSChris Wilson * will known the batch object prior to construction and being able to place 15201a71cf2fSChris Wilson * it into the execobject[] array first can simplify the relocation tracking. 15211a71cf2fSChris Wilson * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 15221a71cf2fSChris Wilson * execobject[] as the * batch instead (the default is to use the last 15231a71cf2fSChris Wilson * element). 15241a71cf2fSChris Wilson */ 15251a71cf2fSChris Wilson #define I915_EXEC_BATCH_FIRST (1<<18) 1526cf6e7bacSJason Ekstrand 1527cf6e7bacSJason Ekstrand /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1528cf6e7bacSJason Ekstrand * define an array of i915_gem_exec_fence structures which specify a set of 1529cf6e7bacSJason Ekstrand * dma fences to wait upon or signal. 1530cf6e7bacSJason Ekstrand */ 1531cf6e7bacSJason Ekstrand #define I915_EXEC_FENCE_ARRAY (1<<19) 1532cf6e7bacSJason Ekstrand 1533a88b6e4cSChris Wilson /* 1534a88b6e4cSChris Wilson * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent 1535a88b6e4cSChris Wilson * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1536a88b6e4cSChris Wilson * the batch. 1537a88b6e4cSChris Wilson * 1538a88b6e4cSChris Wilson * Returns -EINVAL if the sync_file fd cannot be found. 1539a88b6e4cSChris Wilson */ 1540a88b6e4cSChris Wilson #define I915_EXEC_FENCE_SUBMIT (1 << 20) 1541a88b6e4cSChris Wilson 1542cda9edd0SLionel Landwerlin /* 1543cda9edd0SLionel Landwerlin * Setting I915_EXEC_USE_EXTENSIONS implies that 1544cda9edd0SLionel Landwerlin * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked 1545cda9edd0SLionel Landwerlin * list of i915_user_extension. Each i915_user_extension node is the base of a 1546cda9edd0SLionel Landwerlin * larger structure. The list of supported structures are listed in the 1547cda9edd0SLionel Landwerlin * drm_i915_gem_execbuffer_ext enum. 1548cda9edd0SLionel Landwerlin */ 1549cda9edd0SLionel Landwerlin #define I915_EXEC_USE_EXTENSIONS (1 << 21) 1550cda9edd0SLionel Landwerlin #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) 1551ed5982e6SDaniel Vetter 1552a913bde8SNiranjana Vishwanathapura /** @rsvd1: Context id */ 1553a913bde8SNiranjana Vishwanathapura __u64 rsvd1; 1554a913bde8SNiranjana Vishwanathapura 1555a913bde8SNiranjana Vishwanathapura /** 1556a913bde8SNiranjana Vishwanathapura * @rsvd2: in and out sync_file file descriptors. 1557a913bde8SNiranjana Vishwanathapura * 1558a913bde8SNiranjana Vishwanathapura * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the 1559a913bde8SNiranjana Vishwanathapura * lower 32 bits of this field will have the in sync_file fd (input). 1560a913bde8SNiranjana Vishwanathapura * 1561a913bde8SNiranjana Vishwanathapura * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this 1562a913bde8SNiranjana Vishwanathapura * field will have the out sync_file fd (output). 1563a913bde8SNiranjana Vishwanathapura */ 1564a913bde8SNiranjana Vishwanathapura __u64 rsvd2; 1565a913bde8SNiranjana Vishwanathapura }; 1566a913bde8SNiranjana Vishwanathapura 1567718dceddSDavid Howells #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1568718dceddSDavid Howells #define i915_execbuffer2_set_context_id(eb2, context) \ 1569718dceddSDavid Howells (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1570718dceddSDavid Howells #define i915_execbuffer2_get_context_id(eb2) \ 1571718dceddSDavid Howells ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1572718dceddSDavid Howells 1573718dceddSDavid Howells struct drm_i915_gem_pin { 1574718dceddSDavid Howells /** Handle of the buffer to be pinned. */ 1575718dceddSDavid Howells __u32 handle; 1576718dceddSDavid Howells __u32 pad; 1577718dceddSDavid Howells 1578718dceddSDavid Howells /** alignment required within the aperture */ 1579718dceddSDavid Howells __u64 alignment; 1580718dceddSDavid Howells 1581718dceddSDavid Howells /** Returned GTT offset of the buffer. */ 1582718dceddSDavid Howells __u64 offset; 1583718dceddSDavid Howells }; 1584718dceddSDavid Howells 1585718dceddSDavid Howells struct drm_i915_gem_unpin { 1586718dceddSDavid Howells /** Handle of the buffer to be unpinned. */ 1587718dceddSDavid Howells __u32 handle; 1588718dceddSDavid Howells __u32 pad; 1589718dceddSDavid Howells }; 1590718dceddSDavid Howells 1591718dceddSDavid Howells struct drm_i915_gem_busy { 1592718dceddSDavid Howells /** Handle of the buffer to check for busy */ 1593718dceddSDavid Howells __u32 handle; 1594718dceddSDavid Howells 1595426960beSChris Wilson /** Return busy status 1596426960beSChris Wilson * 1597426960beSChris Wilson * A return of 0 implies that the object is idle (after 1598426960beSChris Wilson * having flushed any pending activity), and a non-zero return that 1599426960beSChris Wilson * the object is still in-flight on the GPU. (The GPU has not yet 1600426960beSChris Wilson * signaled completion for all pending requests that reference the 16011255501dSChris Wilson * object.) An object is guaranteed to become idle eventually (so 16021255501dSChris Wilson * long as no new GPU commands are executed upon it). Due to the 16031255501dSChris Wilson * asynchronous nature of the hardware, an object reported 16041255501dSChris Wilson * as busy may become idle before the ioctl is completed. 16051255501dSChris Wilson * 16061255501dSChris Wilson * Furthermore, if the object is busy, which engine is busy is only 1607c8b50242SChris Wilson * provided as a guide and only indirectly by reporting its class 1608c8b50242SChris Wilson * (there may be more than one engine in each class). There are race 1609c8b50242SChris Wilson * conditions which prevent the report of which engines are busy from 1610c8b50242SChris Wilson * being always accurate. However, the converse is not true. If the 1611c8b50242SChris Wilson * object is idle, the result of the ioctl, that all engines are idle, 1612c8b50242SChris Wilson * is accurate. 1613426960beSChris Wilson * 1614426960beSChris Wilson * The returned dword is split into two fields to indicate both 1615afa5cf31SRandy Dunlap * the engine classes on which the object is being read, and the 1616c8b50242SChris Wilson * engine class on which it is currently being written (if any). 1617426960beSChris Wilson * 1618426960beSChris Wilson * The low word (bits 0:15) indicate if the object is being written 1619426960beSChris Wilson * to by any engine (there can only be one, as the GEM implicit 1620426960beSChris Wilson * synchronisation rules force writes to be serialised). Only the 1621c8b50242SChris Wilson * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1622c8b50242SChris Wilson * 1 not 0 etc) for the last write is reported. 1623426960beSChris Wilson * 1624c8b50242SChris Wilson * The high word (bits 16:31) are a bitmask of which engines classes 1625c8b50242SChris Wilson * are currently reading from the object. Multiple engines may be 1626426960beSChris Wilson * reading from the object simultaneously. 1627426960beSChris Wilson * 1628c8b50242SChris Wilson * The value of each engine class is the same as specified in the 1629c649432eSTvrtko Ursulin * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e. 1630c8b50242SChris Wilson * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1631c649432eSTvrtko Ursulin * Some hardware may have parallel execution engines, e.g. multiple 1632c649432eSTvrtko Ursulin * media engines, which are mapped to the same class identifier and so 1633c649432eSTvrtko Ursulin * are not separately reported for busyness. 16341255501dSChris Wilson * 16351255501dSChris Wilson * Caveat emptor: 16361255501dSChris Wilson * Only the boolean result of this query is reliable; that is whether 16371255501dSChris Wilson * the object is idle or busy. The report of which engines are busy 16381255501dSChris Wilson * should be only used as a heuristic. 1639718dceddSDavid Howells */ 1640718dceddSDavid Howells __u32 busy; 1641718dceddSDavid Howells }; 1642718dceddSDavid Howells 164335c7ab42SDaniel Vetter /** 1644289f5a72SMatthew Auld * struct drm_i915_gem_caching - Set or get the caching for given object 1645289f5a72SMatthew Auld * handle. 164635c7ab42SDaniel Vetter * 1647289f5a72SMatthew Auld * Allow userspace to control the GTT caching bits for a given object when the 1648289f5a72SMatthew Auld * object is later mapped through the ppGTT(or GGTT on older platforms lacking 1649289f5a72SMatthew Auld * ppGTT support, or if the object is used for scanout). Note that this might 1650289f5a72SMatthew Auld * require unbinding the object from the GTT first, if its current caching value 1651289f5a72SMatthew Auld * doesn't match. 1652e7737b67SMatthew Auld * 1653e7737b67SMatthew Auld * Note that this all changes on discrete platforms, starting from DG1, the 1654e7737b67SMatthew Auld * set/get caching is no longer supported, and is now rejected. Instead the CPU 1655e7737b67SMatthew Auld * caching attributes(WB vs WC) will become an immutable creation time property 1656e7737b67SMatthew Auld * for the object, along with the GTT caching level. For now we don't expose any 1657e7737b67SMatthew Auld * new uAPI for this, instead on DG1 this is all implicit, although this largely 1658e7737b67SMatthew Auld * shouldn't matter since DG1 is coherent by default(without any way of 1659e7737b67SMatthew Auld * controlling it). 1660e7737b67SMatthew Auld * 1661e7737b67SMatthew Auld * Implicit caching rules, starting from DG1: 1662e7737b67SMatthew Auld * 1663e7737b67SMatthew Auld * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions) 1664e7737b67SMatthew Auld * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and 1665e7737b67SMatthew Auld * mapped as write-combined only. 1666e7737b67SMatthew Auld * 1667e7737b67SMatthew Auld * - Everything else is always allocated and mapped as write-back, with the 1668e7737b67SMatthew Auld * guarantee that everything is also coherent with the GPU. 1669e7737b67SMatthew Auld * 1670e7737b67SMatthew Auld * Note that this is likely to change in the future again, where we might need 1671e7737b67SMatthew Auld * more flexibility on future devices, so making this all explicit as part of a 1672e7737b67SMatthew Auld * new &drm_i915_gem_create_ext extension is probable. 1673e7737b67SMatthew Auld * 1674e7737b67SMatthew Auld * Side note: Part of the reason for this is that changing the at-allocation-time CPU 1675e7737b67SMatthew Auld * caching attributes for the pages might be required(and is expensive) if we 1676e7737b67SMatthew Auld * need to then CPU map the pages later with different caching attributes. This 1677e7737b67SMatthew Auld * inconsistent caching behaviour, while supported on x86, is not universally 1678e7737b67SMatthew Auld * supported on other architectures. So for simplicity we opt for setting 1679e7737b67SMatthew Auld * everything at creation time, whilst also making it immutable, on discrete 1680e7737b67SMatthew Auld * platforms. 168135c7ab42SDaniel Vetter */ 1682718dceddSDavid Howells struct drm_i915_gem_caching { 1683718dceddSDavid Howells /** 1684289f5a72SMatthew Auld * @handle: Handle of the buffer to set/get the caching level. 1685289f5a72SMatthew Auld */ 1686718dceddSDavid Howells __u32 handle; 1687718dceddSDavid Howells 1688718dceddSDavid Howells /** 1689289f5a72SMatthew Auld * @caching: The GTT caching level to apply or possible return value. 1690718dceddSDavid Howells * 1691289f5a72SMatthew Auld * The supported @caching values: 1692289f5a72SMatthew Auld * 1693289f5a72SMatthew Auld * I915_CACHING_NONE: 1694289f5a72SMatthew Auld * 1695289f5a72SMatthew Auld * GPU access is not coherent with CPU caches. Default for machines 1696289f5a72SMatthew Auld * without an LLC. This means manual flushing might be needed, if we 1697289f5a72SMatthew Auld * want GPU access to be coherent. 1698289f5a72SMatthew Auld * 1699289f5a72SMatthew Auld * I915_CACHING_CACHED: 1700289f5a72SMatthew Auld * 1701289f5a72SMatthew Auld * GPU access is coherent with CPU caches and furthermore the data is 1702289f5a72SMatthew Auld * cached in last-level caches shared between CPU cores and the GPU GT. 1703289f5a72SMatthew Auld * 1704289f5a72SMatthew Auld * I915_CACHING_DISPLAY: 1705289f5a72SMatthew Auld * 1706289f5a72SMatthew Auld * Special GPU caching mode which is coherent with the scanout engines. 1707289f5a72SMatthew Auld * Transparently falls back to I915_CACHING_NONE on platforms where no 1708289f5a72SMatthew Auld * special cache mode (like write-through or gfdt flushing) is 1709289f5a72SMatthew Auld * available. The kernel automatically sets this mode when using a 1710289f5a72SMatthew Auld * buffer as a scanout target. Userspace can manually set this mode to 1711289f5a72SMatthew Auld * avoid a costly stall and clflush in the hotpath of drawing the first 1712289f5a72SMatthew Auld * frame. 1713289f5a72SMatthew Auld */ 1714289f5a72SMatthew Auld #define I915_CACHING_NONE 0 1715289f5a72SMatthew Auld #define I915_CACHING_CACHED 1 1716289f5a72SMatthew Auld #define I915_CACHING_DISPLAY 2 1717718dceddSDavid Howells __u32 caching; 1718718dceddSDavid Howells }; 1719718dceddSDavid Howells 1720718dceddSDavid Howells #define I915_TILING_NONE 0 1721718dceddSDavid Howells #define I915_TILING_X 1 1722718dceddSDavid Howells #define I915_TILING_Y 2 1723ea673f17SMatt Roper /* 1724ea673f17SMatt Roper * Do not add new tiling types here. The I915_TILING_* values are for 1725ea673f17SMatt Roper * de-tiling fence registers that no longer exist on modern platforms. Although 1726ea673f17SMatt Roper * the hardware may support new types of tiling in general (e.g., Tile4), we 1727ea673f17SMatt Roper * do not need to add them to the uapi that is specific to now-defunct ioctls. 1728ea673f17SMatt Roper */ 1729deeb1519SChris Wilson #define I915_TILING_LAST I915_TILING_Y 1730718dceddSDavid Howells 1731718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_NONE 0 1732718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9 1 1733718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10 2 1734718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_11 3 1735718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_11 4 1736718dceddSDavid Howells /* Not seen by userland */ 1737718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1738718dceddSDavid Howells /* Seen by userland. */ 1739718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_17 6 1740718dceddSDavid Howells #define I915_BIT_6_SWIZZLE_9_10_17 7 1741718dceddSDavid Howells 1742718dceddSDavid Howells struct drm_i915_gem_set_tiling { 1743718dceddSDavid Howells /** Handle of the buffer to have its tiling state updated */ 1744718dceddSDavid Howells __u32 handle; 1745718dceddSDavid Howells 1746718dceddSDavid Howells /** 1747718dceddSDavid Howells * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1748718dceddSDavid Howells * I915_TILING_Y). 1749718dceddSDavid Howells * 1750718dceddSDavid Howells * This value is to be set on request, and will be updated by the 1751718dceddSDavid Howells * kernel on successful return with the actual chosen tiling layout. 1752718dceddSDavid Howells * 1753718dceddSDavid Howells * The tiling mode may be demoted to I915_TILING_NONE when the system 1754718dceddSDavid Howells * has bit 6 swizzling that can't be managed correctly by GEM. 1755718dceddSDavid Howells * 1756718dceddSDavid Howells * Buffer contents become undefined when changing tiling_mode. 1757718dceddSDavid Howells */ 1758718dceddSDavid Howells __u32 tiling_mode; 1759718dceddSDavid Howells 1760718dceddSDavid Howells /** 1761718dceddSDavid Howells * Stride in bytes for the object when in I915_TILING_X or 1762718dceddSDavid Howells * I915_TILING_Y. 1763718dceddSDavid Howells */ 1764718dceddSDavid Howells __u32 stride; 1765718dceddSDavid Howells 1766718dceddSDavid Howells /** 1767718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1768718dceddSDavid Howells * mmap mapping. 1769718dceddSDavid Howells */ 1770718dceddSDavid Howells __u32 swizzle_mode; 1771718dceddSDavid Howells }; 1772718dceddSDavid Howells 1773718dceddSDavid Howells struct drm_i915_gem_get_tiling { 1774718dceddSDavid Howells /** Handle of the buffer to get tiling state for. */ 1775718dceddSDavid Howells __u32 handle; 1776718dceddSDavid Howells 1777718dceddSDavid Howells /** 1778718dceddSDavid Howells * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1779718dceddSDavid Howells * I915_TILING_Y). 1780718dceddSDavid Howells */ 1781718dceddSDavid Howells __u32 tiling_mode; 1782718dceddSDavid Howells 1783718dceddSDavid Howells /** 1784718dceddSDavid Howells * Returned address bit 6 swizzling required for CPU access through 1785718dceddSDavid Howells * mmap mapping. 1786718dceddSDavid Howells */ 1787718dceddSDavid Howells __u32 swizzle_mode; 178870f2f5c7SChris Wilson 178970f2f5c7SChris Wilson /** 179070f2f5c7SChris Wilson * Returned address bit 6 swizzling required for CPU access through 179170f2f5c7SChris Wilson * mmap mapping whilst bound. 179270f2f5c7SChris Wilson */ 179370f2f5c7SChris Wilson __u32 phys_swizzle_mode; 1794718dceddSDavid Howells }; 1795718dceddSDavid Howells 1796718dceddSDavid Howells struct drm_i915_gem_get_aperture { 1797718dceddSDavid Howells /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1798718dceddSDavid Howells __u64 aper_size; 1799718dceddSDavid Howells 1800718dceddSDavid Howells /** 1801718dceddSDavid Howells * Available space in the aperture used by i915_gem_execbuffer, in 1802718dceddSDavid Howells * bytes 1803718dceddSDavid Howells */ 1804718dceddSDavid Howells __u64 aper_available_size; 1805718dceddSDavid Howells }; 1806718dceddSDavid Howells 1807718dceddSDavid Howells struct drm_i915_get_pipe_from_crtc_id { 1808718dceddSDavid Howells /** ID of CRTC being requested **/ 1809718dceddSDavid Howells __u32 crtc_id; 1810718dceddSDavid Howells 1811718dceddSDavid Howells /** pipe of requested CRTC **/ 1812718dceddSDavid Howells __u32 pipe; 1813718dceddSDavid Howells }; 1814718dceddSDavid Howells 1815718dceddSDavid Howells #define I915_MADV_WILLNEED 0 1816718dceddSDavid Howells #define I915_MADV_DONTNEED 1 1817718dceddSDavid Howells #define __I915_MADV_PURGED 2 /* internal state */ 1818718dceddSDavid Howells 1819718dceddSDavid Howells struct drm_i915_gem_madvise { 1820718dceddSDavid Howells /** Handle of the buffer to change the backing store advice */ 1821718dceddSDavid Howells __u32 handle; 1822718dceddSDavid Howells 1823718dceddSDavid Howells /* Advice: either the buffer will be needed again in the near future, 1824afa5cf31SRandy Dunlap * or won't be and could be discarded under memory pressure. 1825718dceddSDavid Howells */ 1826718dceddSDavid Howells __u32 madv; 1827718dceddSDavid Howells 1828718dceddSDavid Howells /** Whether the backing store still exists. */ 1829718dceddSDavid Howells __u32 retained; 1830718dceddSDavid Howells }; 1831718dceddSDavid Howells 1832718dceddSDavid Howells /* flags */ 1833718dceddSDavid Howells #define I915_OVERLAY_TYPE_MASK 0xff 1834718dceddSDavid Howells #define I915_OVERLAY_YUV_PLANAR 0x01 1835718dceddSDavid Howells #define I915_OVERLAY_YUV_PACKED 0x02 1836718dceddSDavid Howells #define I915_OVERLAY_RGB 0x03 1837718dceddSDavid Howells 1838718dceddSDavid Howells #define I915_OVERLAY_DEPTH_MASK 0xff00 1839718dceddSDavid Howells #define I915_OVERLAY_RGB24 0x1000 1840718dceddSDavid Howells #define I915_OVERLAY_RGB16 0x2000 1841718dceddSDavid Howells #define I915_OVERLAY_RGB15 0x3000 1842718dceddSDavid Howells #define I915_OVERLAY_YUV422 0x0100 1843718dceddSDavid Howells #define I915_OVERLAY_YUV411 0x0200 1844718dceddSDavid Howells #define I915_OVERLAY_YUV420 0x0300 1845718dceddSDavid Howells #define I915_OVERLAY_YUV410 0x0400 1846718dceddSDavid Howells 1847718dceddSDavid Howells #define I915_OVERLAY_SWAP_MASK 0xff0000 1848718dceddSDavid Howells #define I915_OVERLAY_NO_SWAP 0x000000 1849718dceddSDavid Howells #define I915_OVERLAY_UV_SWAP 0x010000 1850718dceddSDavid Howells #define I915_OVERLAY_Y_SWAP 0x020000 1851718dceddSDavid Howells #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1852718dceddSDavid Howells 1853718dceddSDavid Howells #define I915_OVERLAY_FLAGS_MASK 0xff000000 1854718dceddSDavid Howells #define I915_OVERLAY_ENABLE 0x01000000 1855718dceddSDavid Howells 1856718dceddSDavid Howells struct drm_intel_overlay_put_image { 1857718dceddSDavid Howells /* various flags and src format description */ 1858718dceddSDavid Howells __u32 flags; 1859718dceddSDavid Howells /* source picture description */ 1860718dceddSDavid Howells __u32 bo_handle; 1861718dceddSDavid Howells /* stride values and offsets are in bytes, buffer relative */ 1862718dceddSDavid Howells __u16 stride_Y; /* stride for packed formats */ 1863718dceddSDavid Howells __u16 stride_UV; 1864718dceddSDavid Howells __u32 offset_Y; /* offset for packet formats */ 1865718dceddSDavid Howells __u32 offset_U; 1866718dceddSDavid Howells __u32 offset_V; 1867718dceddSDavid Howells /* in pixels */ 1868718dceddSDavid Howells __u16 src_width; 1869718dceddSDavid Howells __u16 src_height; 1870718dceddSDavid Howells /* to compensate the scaling factors for partially covered surfaces */ 1871718dceddSDavid Howells __u16 src_scan_width; 1872718dceddSDavid Howells __u16 src_scan_height; 1873718dceddSDavid Howells /* output crtc description */ 1874718dceddSDavid Howells __u32 crtc_id; 1875718dceddSDavid Howells __u16 dst_x; 1876718dceddSDavid Howells __u16 dst_y; 1877718dceddSDavid Howells __u16 dst_width; 1878718dceddSDavid Howells __u16 dst_height; 1879718dceddSDavid Howells }; 1880718dceddSDavid Howells 1881718dceddSDavid Howells /* flags */ 1882718dceddSDavid Howells #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1883718dceddSDavid Howells #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1884ea9da4e4SChris Wilson #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1885718dceddSDavid Howells struct drm_intel_overlay_attrs { 1886718dceddSDavid Howells __u32 flags; 1887718dceddSDavid Howells __u32 color_key; 1888718dceddSDavid Howells __s32 brightness; 1889718dceddSDavid Howells __u32 contrast; 1890718dceddSDavid Howells __u32 saturation; 1891718dceddSDavid Howells __u32 gamma0; 1892718dceddSDavid Howells __u32 gamma1; 1893718dceddSDavid Howells __u32 gamma2; 1894718dceddSDavid Howells __u32 gamma3; 1895718dceddSDavid Howells __u32 gamma4; 1896718dceddSDavid Howells __u32 gamma5; 1897718dceddSDavid Howells }; 1898718dceddSDavid Howells 1899718dceddSDavid Howells /* 1900718dceddSDavid Howells * Intel sprite handling 1901718dceddSDavid Howells * 1902718dceddSDavid Howells * Color keying works with a min/mask/max tuple. Both source and destination 1903718dceddSDavid Howells * color keying is allowed. 1904718dceddSDavid Howells * 1905718dceddSDavid Howells * Source keying: 1906718dceddSDavid Howells * Sprite pixels within the min & max values, masked against the color channels 1907718dceddSDavid Howells * specified in the mask field, will be transparent. All other pixels will 1908718dceddSDavid Howells * be displayed on top of the primary plane. For RGB surfaces, only the min 1909718dceddSDavid Howells * and mask fields will be used; ranged compares are not allowed. 1910718dceddSDavid Howells * 1911718dceddSDavid Howells * Destination keying: 1912718dceddSDavid Howells * Primary plane pixels that match the min value, masked against the color 1913718dceddSDavid Howells * channels specified in the mask field, will be replaced by corresponding 1914718dceddSDavid Howells * pixels from the sprite plane. 1915718dceddSDavid Howells * 1916718dceddSDavid Howells * Note that source & destination keying are exclusive; only one can be 1917718dceddSDavid Howells * active on a given plane. 1918718dceddSDavid Howells */ 1919718dceddSDavid Howells 19206ec5bd34SVille Syrjälä #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set 19216ec5bd34SVille Syrjälä * flags==0 to disable colorkeying. 19226ec5bd34SVille Syrjälä */ 1923718dceddSDavid Howells #define I915_SET_COLORKEY_DESTINATION (1<<1) 1924718dceddSDavid Howells #define I915_SET_COLORKEY_SOURCE (1<<2) 1925718dceddSDavid Howells struct drm_intel_sprite_colorkey { 1926718dceddSDavid Howells __u32 plane_id; 1927718dceddSDavid Howells __u32 min_value; 1928718dceddSDavid Howells __u32 channel_mask; 1929718dceddSDavid Howells __u32 max_value; 1930718dceddSDavid Howells __u32 flags; 1931718dceddSDavid Howells }; 1932718dceddSDavid Howells 1933718dceddSDavid Howells struct drm_i915_gem_wait { 1934718dceddSDavid Howells /** Handle of BO we shall wait on */ 1935718dceddSDavid Howells __u32 bo_handle; 1936718dceddSDavid Howells __u32 flags; 1937718dceddSDavid Howells /** Number of nanoseconds to wait, Returns time remaining. */ 1938718dceddSDavid Howells __s64 timeout_ns; 1939718dceddSDavid Howells }; 1940718dceddSDavid Howells 1941718dceddSDavid Howells struct drm_i915_gem_context_create { 1942b9171541SChris Wilson __u32 ctx_id; /* output: id of new context*/ 1943718dceddSDavid Howells __u32 pad; 1944718dceddSDavid Howells }; 1945718dceddSDavid Howells 1946a913bde8SNiranjana Vishwanathapura /** 1947a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_context_create_ext - Structure for creating contexts. 1948a913bde8SNiranjana Vishwanathapura */ 1949b9171541SChris Wilson struct drm_i915_gem_context_create_ext { 1950a913bde8SNiranjana Vishwanathapura /** @ctx_id: Id of the created context (output) */ 1951a913bde8SNiranjana Vishwanathapura __u32 ctx_id; 1952a913bde8SNiranjana Vishwanathapura 1953a913bde8SNiranjana Vishwanathapura /** 1954a913bde8SNiranjana Vishwanathapura * @flags: Supported flags are: 1955a913bde8SNiranjana Vishwanathapura * 1956a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS: 1957a913bde8SNiranjana Vishwanathapura * 1958a913bde8SNiranjana Vishwanathapura * Extensions may be appended to this structure and driver must check 1959a913bde8SNiranjana Vishwanathapura * for those. See @extensions. 1960a913bde8SNiranjana Vishwanathapura * 1961a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE 1962a913bde8SNiranjana Vishwanathapura * 1963a913bde8SNiranjana Vishwanathapura * Created context will have single timeline. 1964a913bde8SNiranjana Vishwanathapura */ 1965b9171541SChris Wilson __u32 flags; 1966b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 19678319f44cSChris Wilson #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) 1968b9171541SChris Wilson #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 19698319f44cSChris Wilson (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) 1970a913bde8SNiranjana Vishwanathapura 1971a913bde8SNiranjana Vishwanathapura /** 1972a913bde8SNiranjana Vishwanathapura * @extensions: Zero-terminated chain of extensions. 1973a913bde8SNiranjana Vishwanathapura * 1974a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_EXT_SETPARAM: 1975a913bde8SNiranjana Vishwanathapura * Context parameter to set or query during context creation. 1976a913bde8SNiranjana Vishwanathapura * See struct drm_i915_gem_context_create_ext_setparam. 1977a913bde8SNiranjana Vishwanathapura * 1978a913bde8SNiranjana Vishwanathapura * I915_CONTEXT_CREATE_EXT_CLONE: 1979a913bde8SNiranjana Vishwanathapura * This extension has been removed. On the off chance someone somewhere 1980a913bde8SNiranjana Vishwanathapura * has attempted to use it, never re-use this extension number. 1981a913bde8SNiranjana Vishwanathapura */ 1982e0695db7SChris Wilson __u64 extensions; 1983a913bde8SNiranjana Vishwanathapura #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1984a913bde8SNiranjana Vishwanathapura #define I915_CONTEXT_CREATE_EXT_CLONE 1 19855cc9ed4bSChris Wilson }; 19865cc9ed4bSChris Wilson 1987a913bde8SNiranjana Vishwanathapura /** 1988a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_context_param - Context parameter to set or query. 1989a913bde8SNiranjana Vishwanathapura */ 1990c9dc0f35SChris Wilson struct drm_i915_gem_context_param { 1991a913bde8SNiranjana Vishwanathapura /** @ctx_id: Context id */ 1992c9dc0f35SChris Wilson __u32 ctx_id; 1993a913bde8SNiranjana Vishwanathapura 1994a913bde8SNiranjana Vishwanathapura /** @size: Size of the parameter @value */ 1995c9dc0f35SChris Wilson __u32 size; 1996a913bde8SNiranjana Vishwanathapura 1997a913bde8SNiranjana Vishwanathapura /** @param: Parameter to set or query */ 1998c9dc0f35SChris Wilson __u64 param; 1999c9dc0f35SChris Wilson #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 20006ff6d61dSJason Ekstrand /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance 20016ff6d61dSJason Ekstrand * someone somewhere has attempted to use it, never re-use this context 20026ff6d61dSJason Ekstrand * param number. 20036ff6d61dSJason Ekstrand */ 2004b1b38278SDavid Weinehall #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 2005fa8848f2SChris Wilson #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 2006bc3d6744SChris Wilson #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 200784102171SMika Kuoppala #define I915_CONTEXT_PARAM_BANNABLE 0x5 2008ac14fbd4SChris Wilson #define I915_CONTEXT_PARAM_PRIORITY 0x6 2009ac14fbd4SChris Wilson #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 2010ac14fbd4SChris Wilson #define I915_CONTEXT_DEFAULT_PRIORITY 0 2011ac14fbd4SChris Wilson #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 2012e46c2e99STvrtko Ursulin /* 2013e46c2e99STvrtko Ursulin * When using the following param, value should be a pointer to 2014e46c2e99STvrtko Ursulin * drm_i915_gem_context_param_sseu. 2015e46c2e99STvrtko Ursulin */ 2016e46c2e99STvrtko Ursulin #define I915_CONTEXT_PARAM_SSEU 0x7 2017ba4fda62SChris Wilson 2018ba4fda62SChris Wilson /* 2019ba4fda62SChris Wilson * Not all clients may want to attempt automatic recover of a context after 2020ba4fda62SChris Wilson * a hang (for example, some clients may only submit very small incremental 2021ba4fda62SChris Wilson * batches relying on known logical state of previous batches which will never 2022ba4fda62SChris Wilson * recover correctly and each attempt will hang), and so would prefer that 2023ba4fda62SChris Wilson * the context is forever banned instead. 2024ba4fda62SChris Wilson * 2025ba4fda62SChris Wilson * If set to false (0), after a reset, subsequent (and in flight) rendering 2026ba4fda62SChris Wilson * from this context is discarded, and the client will need to create a new 2027ba4fda62SChris Wilson * context to use instead. 2028ba4fda62SChris Wilson * 2029ba4fda62SChris Wilson * If set to true (1), the kernel will automatically attempt to recover the 2030ba4fda62SChris Wilson * context by skipping the hanging batch and executing the next batch starting 2031ba4fda62SChris Wilson * from the default context state (discarding the incomplete logical context 2032ba4fda62SChris Wilson * state lost due to the reset). 2033ba4fda62SChris Wilson * 2034ba4fda62SChris Wilson * On creation, all new contexts are marked as recoverable. 2035ba4fda62SChris Wilson */ 2036ba4fda62SChris Wilson #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 20377f3f317aSChris Wilson 20387f3f317aSChris Wilson /* 20397f3f317aSChris Wilson * The id of the associated virtual memory address space (ppGTT) of 20407f3f317aSChris Wilson * this context. Can be retrieved and passed to another context 20417f3f317aSChris Wilson * (on the same fd) for both to use the same ppGTT and so share 20427f3f317aSChris Wilson * address layouts, and avoid reloading the page tables on context 20437f3f317aSChris Wilson * switches between themselves. 20447f3f317aSChris Wilson * 20457f3f317aSChris Wilson * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. 20467f3f317aSChris Wilson */ 20477f3f317aSChris Wilson #define I915_CONTEXT_PARAM_VM 0x9 2048976b55f0SChris Wilson 2049976b55f0SChris Wilson /* 2050976b55f0SChris Wilson * I915_CONTEXT_PARAM_ENGINES: 2051976b55f0SChris Wilson * 2052976b55f0SChris Wilson * Bind this context to operate on this subset of available engines. Henceforth, 2053976b55f0SChris Wilson * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as 2054976b55f0SChris Wilson * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] 2055976b55f0SChris Wilson * and upwards. Slots 0...N are filled in using the specified (class, instance). 2056976b55f0SChris Wilson * Use 2057976b55f0SChris Wilson * engine_class: I915_ENGINE_CLASS_INVALID, 2058976b55f0SChris Wilson * engine_instance: I915_ENGINE_CLASS_INVALID_NONE 2059976b55f0SChris Wilson * to specify a gap in the array that can be filled in later, e.g. by a 2060976b55f0SChris Wilson * virtual engine used for load balancing. 2061976b55f0SChris Wilson * 2062976b55f0SChris Wilson * Setting the number of engines bound to the context to 0, by passing a zero 2063976b55f0SChris Wilson * sized argument, will revert back to default settings. 2064976b55f0SChris Wilson * 2065976b55f0SChris Wilson * See struct i915_context_param_engines. 2066ee113690SChris Wilson * 2067ee113690SChris Wilson * Extensions: 2068ee113690SChris Wilson * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) 2069ee113690SChris Wilson * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) 2070e5e32171SMatthew Brost * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT) 2071976b55f0SChris Wilson */ 2072976b55f0SChris Wilson #define I915_CONTEXT_PARAM_ENGINES 0xa 2073a0e04715SChris Wilson 2074a0e04715SChris Wilson /* 2075a0e04715SChris Wilson * I915_CONTEXT_PARAM_PERSISTENCE: 2076a0e04715SChris Wilson * 2077a0e04715SChris Wilson * Allow the context and active rendering to survive the process until 2078a0e04715SChris Wilson * completion. Persistence allows fire-and-forget clients to queue up a 2079a0e04715SChris Wilson * bunch of work, hand the output over to a display server and then quit. 2080a0e04715SChris Wilson * If the context is marked as not persistent, upon closing (either via 2081a0e04715SChris Wilson * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure 2082a0e04715SChris Wilson * or process termination), the context and any outstanding requests will be 2083a0e04715SChris Wilson * cancelled (and exported fences for cancelled requests marked as -EIO). 2084a0e04715SChris Wilson * 2085a0e04715SChris Wilson * By default, new contexts allow persistence. 2086a0e04715SChris Wilson */ 2087a0e04715SChris Wilson #define I915_CONTEXT_PARAM_PERSISTENCE 0xb 208888be76cdSChris Wilson 2089fe4751c3SJason Ekstrand /* This API has been removed. On the off chance someone somewhere has 2090fe4751c3SJason Ekstrand * attempted to use it, never re-use this context param number. 209188be76cdSChris Wilson */ 209288be76cdSChris Wilson #define I915_CONTEXT_PARAM_RINGSIZE 0xc 2093d3ac8d42SDaniele Ceraolo Spurio 2094d3ac8d42SDaniele Ceraolo Spurio /* 2095d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2096d3ac8d42SDaniele Ceraolo Spurio * 2097d3ac8d42SDaniele Ceraolo Spurio * Mark that the context makes use of protected content, which will result 2098d3ac8d42SDaniele Ceraolo Spurio * in the context being invalidated when the protected content session is. 2099d3ac8d42SDaniele Ceraolo Spurio * Given that the protected content session is killed on suspend, the device 2100d3ac8d42SDaniele Ceraolo Spurio * is kept awake for the lifetime of a protected context, so the user should 2101d3ac8d42SDaniele Ceraolo Spurio * make sure to dispose of them once done. 2102d3ac8d42SDaniele Ceraolo Spurio * This flag can only be set at context creation time and, when set to true, 2103d3ac8d42SDaniele Ceraolo Spurio * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE 2104d3ac8d42SDaniele Ceraolo Spurio * to false. This flag can't be set to true in conjunction with setting the 2105d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example: 2106d3ac8d42SDaniele Ceraolo Spurio * 2107d3ac8d42SDaniele Ceraolo Spurio * .. code-block:: C 2108d3ac8d42SDaniele Ceraolo Spurio * 2109d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext_setparam p_protected = { 2110d3ac8d42SDaniele Ceraolo Spurio * .base = { 2111d3ac8d42SDaniele Ceraolo Spurio * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 2112d3ac8d42SDaniele Ceraolo Spurio * }, 2113d3ac8d42SDaniele Ceraolo Spurio * .param = { 2114d3ac8d42SDaniele Ceraolo Spurio * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT, 2115d3ac8d42SDaniele Ceraolo Spurio * .value = 1, 2116d3ac8d42SDaniele Ceraolo Spurio * } 2117d3ac8d42SDaniele Ceraolo Spurio * }; 2118d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext_setparam p_norecover = { 2119d3ac8d42SDaniele Ceraolo Spurio * .base = { 2120d3ac8d42SDaniele Ceraolo Spurio * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 2121d3ac8d42SDaniele Ceraolo Spurio * .next_extension = to_user_pointer(&p_protected), 2122d3ac8d42SDaniele Ceraolo Spurio * }, 2123d3ac8d42SDaniele Ceraolo Spurio * .param = { 2124d3ac8d42SDaniele Ceraolo Spurio * .param = I915_CONTEXT_PARAM_RECOVERABLE, 2125d3ac8d42SDaniele Ceraolo Spurio * .value = 0, 2126d3ac8d42SDaniele Ceraolo Spurio * } 2127d3ac8d42SDaniele Ceraolo Spurio * }; 2128d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_context_create_ext create = { 2129d3ac8d42SDaniele Ceraolo Spurio * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 2130d3ac8d42SDaniele Ceraolo Spurio * .extensions = to_user_pointer(&p_norecover); 2131d3ac8d42SDaniele Ceraolo Spurio * }; 2132d3ac8d42SDaniele Ceraolo Spurio * 2133d3ac8d42SDaniele Ceraolo Spurio * ctx_id = gem_context_create_ext(drm_fd, &create); 2134d3ac8d42SDaniele Ceraolo Spurio * 2135d3ac8d42SDaniele Ceraolo Spurio * In addition to the normal failure cases, setting this flag during context 2136d3ac8d42SDaniele Ceraolo Spurio * creation can result in the following errors: 2137d3ac8d42SDaniele Ceraolo Spurio * 2138d3ac8d42SDaniele Ceraolo Spurio * -ENODEV: feature not available 2139d3ac8d42SDaniele Ceraolo Spurio * -EPERM: trying to mark a recoverable or not bannable context as protected 214099afb7ccSAlan Previn * -ENXIO: A dependency such as a component driver or firmware is not yet 214199afb7ccSAlan Previn * loaded so user space may need to attempt again. Depending on the 214299afb7ccSAlan Previn * device, this error may be reported if protected context creation is 214399afb7ccSAlan Previn * attempted very early after kernel start because the internal timeout 214499afb7ccSAlan Previn * waiting for such dependencies is not guaranteed to be larger than 214599afb7ccSAlan Previn * required (numbers differ depending on system and kernel config): 214699afb7ccSAlan Previn * - ADL/RPL: dependencies may take up to 3 seconds from kernel start 214799afb7ccSAlan Previn * while context creation internal timeout is 250 milisecs 214899afb7ccSAlan Previn * - MTL: dependencies may take up to 8 seconds from kernel start 214999afb7ccSAlan Previn * while context creation internal timeout is 250 milisecs 215099afb7ccSAlan Previn * NOTE: such dependencies happen once, so a subsequent call to create a 215199afb7ccSAlan Previn * protected context after a prior successful call will not experience 215299afb7ccSAlan Previn * such timeouts and will not return -ENXIO (unless the driver is reloaded, 215399afb7ccSAlan Previn * or, depending on the device, resumes from a suspended state). 215499afb7ccSAlan Previn * -EIO: The firmware did not succeed in creating the protected context. 2155d3ac8d42SDaniele Ceraolo Spurio */ 2156d3ac8d42SDaniele Ceraolo Spurio #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd 2157cec82816SVinay Belgaumkar 2158cec82816SVinay Belgaumkar /* 2159cec82816SVinay Belgaumkar * I915_CONTEXT_PARAM_LOW_LATENCY: 2160cec82816SVinay Belgaumkar * 2161cec82816SVinay Belgaumkar * Mark this context as a low latency workload which requires aggressive GT 2162cec82816SVinay Belgaumkar * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel 2163cec82816SVinay Belgaumkar * supports this per context flag. 2164cec82816SVinay Belgaumkar */ 2165cec82816SVinay Belgaumkar #define I915_CONTEXT_PARAM_LOW_LATENCY 0xe 2166*0f1bb41bSTvrtko Ursulin 2167*0f1bb41bSTvrtko Ursulin /* 2168*0f1bb41bSTvrtko Ursulin * I915_CONTEXT_PARAM_CONTEXT_IMAGE: 2169*0f1bb41bSTvrtko Ursulin * 2170*0f1bb41bSTvrtko Ursulin * Allows userspace to provide own context images. 2171*0f1bb41bSTvrtko Ursulin * 2172*0f1bb41bSTvrtko Ursulin * Note that this is a debug API not available on production kernel builds. 2173*0f1bb41bSTvrtko Ursulin */ 2174*0f1bb41bSTvrtko Ursulin #define I915_CONTEXT_PARAM_CONTEXT_IMAGE 0xf 2175be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 2176e0695db7SChris Wilson 2177a913bde8SNiranjana Vishwanathapura /** @value: Context parameter value to be set or queried */ 2178c9dc0f35SChris Wilson __u64 value; 2179c9dc0f35SChris Wilson }; 2180c9dc0f35SChris Wilson 21812ef6a01fSMatthew Auld /* 2182e46c2e99STvrtko Ursulin * Context SSEU programming 2183e46c2e99STvrtko Ursulin * 2184e46c2e99STvrtko Ursulin * It may be necessary for either functional or performance reason to configure 2185e46c2e99STvrtko Ursulin * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 2186e46c2e99STvrtko Ursulin * Sub-slice/EU). 2187e46c2e99STvrtko Ursulin * 2188e46c2e99STvrtko Ursulin * This is done by configuring SSEU configuration using the below 2189e46c2e99STvrtko Ursulin * @struct drm_i915_gem_context_param_sseu for every supported engine which 2190e46c2e99STvrtko Ursulin * userspace intends to use. 2191e46c2e99STvrtko Ursulin * 2192e46c2e99STvrtko Ursulin * Not all GPUs or engines support this functionality in which case an error 2193e46c2e99STvrtko Ursulin * code -ENODEV will be returned. 2194e46c2e99STvrtko Ursulin * 2195e46c2e99STvrtko Ursulin * Also, flexibility of possible SSEU configuration permutations varies between 2196e46c2e99STvrtko Ursulin * GPU generations and software imposed limitations. Requesting such a 2197e46c2e99STvrtko Ursulin * combination will return an error code of -EINVAL. 2198e46c2e99STvrtko Ursulin * 2199e46c2e99STvrtko Ursulin * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 2200e46c2e99STvrtko Ursulin * favour of a single global setting. 2201e46c2e99STvrtko Ursulin */ 2202e46c2e99STvrtko Ursulin struct drm_i915_gem_context_param_sseu { 2203e46c2e99STvrtko Ursulin /* 2204e46c2e99STvrtko Ursulin * Engine class & instance to be configured or queried. 2205e46c2e99STvrtko Ursulin */ 2206d1172ab3SChris Wilson struct i915_engine_class_instance engine; 2207e46c2e99STvrtko Ursulin 2208e46c2e99STvrtko Ursulin /* 2209e620f7b3SChris Wilson * Unknown flags must be cleared to zero. 2210e46c2e99STvrtko Ursulin */ 2211e46c2e99STvrtko Ursulin __u32 flags; 2212e620f7b3SChris Wilson #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) 2213e46c2e99STvrtko Ursulin 2214e46c2e99STvrtko Ursulin /* 2215e46c2e99STvrtko Ursulin * Mask of slices to enable for the context. Valid values are a subset 2216e46c2e99STvrtko Ursulin * of the bitmask value returned for I915_PARAM_SLICE_MASK. 2217e46c2e99STvrtko Ursulin */ 2218e46c2e99STvrtko Ursulin __u64 slice_mask; 2219e46c2e99STvrtko Ursulin 2220e46c2e99STvrtko Ursulin /* 2221e46c2e99STvrtko Ursulin * Mask of subslices to enable for the context. Valid values are a 2222e46c2e99STvrtko Ursulin * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 2223e46c2e99STvrtko Ursulin */ 2224e46c2e99STvrtko Ursulin __u64 subslice_mask; 2225e46c2e99STvrtko Ursulin 2226e46c2e99STvrtko Ursulin /* 2227e46c2e99STvrtko Ursulin * Minimum/Maximum number of EUs to enable per subslice for the 2228e46c2e99STvrtko Ursulin * context. min_eus_per_subslice must be inferior or equal to 2229e46c2e99STvrtko Ursulin * max_eus_per_subslice. 2230e46c2e99STvrtko Ursulin */ 2231e46c2e99STvrtko Ursulin __u16 min_eus_per_subslice; 2232e46c2e99STvrtko Ursulin __u16 max_eus_per_subslice; 2233e46c2e99STvrtko Ursulin 2234e46c2e99STvrtko Ursulin /* 2235e46c2e99STvrtko Ursulin * Unused for now. Must be cleared to zero. 2236e46c2e99STvrtko Ursulin */ 2237e46c2e99STvrtko Ursulin __u32 rsvd; 2238e46c2e99STvrtko Ursulin }; 2239e46c2e99STvrtko Ursulin 224057772953STvrtko Ursulin /** 224157772953STvrtko Ursulin * DOC: Virtual Engine uAPI 224257772953STvrtko Ursulin * 224357772953STvrtko Ursulin * Virtual engine is a concept where userspace is able to configure a set of 224457772953STvrtko Ursulin * physical engines, submit a batch buffer, and let the driver execute it on any 224557772953STvrtko Ursulin * engine from the set as it sees fit. 224657772953STvrtko Ursulin * 224757772953STvrtko Ursulin * This is primarily useful on parts which have multiple instances of a same 224857772953STvrtko Ursulin * class engine, like for example GT3+ Skylake parts with their two VCS engines. 224957772953STvrtko Ursulin * 225057772953STvrtko Ursulin * For instance userspace can enumerate all engines of a certain class using the 225157772953STvrtko Ursulin * previously described `Engine Discovery uAPI`_. After that userspace can 225257772953STvrtko Ursulin * create a GEM context with a placeholder slot for the virtual engine (using 225357772953STvrtko Ursulin * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class 225457772953STvrtko Ursulin * and instance respectively) and finally using the 225557772953STvrtko Ursulin * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in 225657772953STvrtko Ursulin * the same reserved slot. 225757772953STvrtko Ursulin * 225857772953STvrtko Ursulin * Example of creating a virtual engine and submitting a batch buffer to it: 225957772953STvrtko Ursulin * 226057772953STvrtko Ursulin * .. code-block:: C 226157772953STvrtko Ursulin * 226257772953STvrtko Ursulin * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = { 226357772953STvrtko Ursulin * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE, 226457772953STvrtko Ursulin * .engine_index = 0, // Place this virtual engine into engine map slot 0 226557772953STvrtko Ursulin * .num_siblings = 2, 226657772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 }, 226757772953STvrtko Ursulin * { I915_ENGINE_CLASS_VIDEO, 1 }, }, 226857772953STvrtko Ursulin * }; 226957772953STvrtko Ursulin * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = { 227057772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_INVALID, 227157772953STvrtko Ursulin * I915_ENGINE_CLASS_INVALID_NONE } }, 227257772953STvrtko Ursulin * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension 227357772953STvrtko Ursulin * }; 227457772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext_setparam p_engines = { 227557772953STvrtko Ursulin * .base = { 227657772953STvrtko Ursulin * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 227757772953STvrtko Ursulin * }, 227857772953STvrtko Ursulin * .param = { 227957772953STvrtko Ursulin * .param = I915_CONTEXT_PARAM_ENGINES, 228057772953STvrtko Ursulin * .value = to_user_pointer(&engines), 228157772953STvrtko Ursulin * .size = sizeof(engines), 228257772953STvrtko Ursulin * }, 228357772953STvrtko Ursulin * }; 228457772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext create = { 228557772953STvrtko Ursulin * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 228657772953STvrtko Ursulin * .extensions = to_user_pointer(&p_engines); 228757772953STvrtko Ursulin * }; 228857772953STvrtko Ursulin * 228957772953STvrtko Ursulin * ctx_id = gem_context_create_ext(drm_fd, &create); 229057772953STvrtko Ursulin * 229157772953STvrtko Ursulin * // Now we have created a GEM context with its engine map containing a 229257772953STvrtko Ursulin * // single virtual engine. Submissions to this slot can go either to 229357772953STvrtko Ursulin * // vcs0 or vcs1, depending on the load balancing algorithm used inside 229457772953STvrtko Ursulin * // the driver. The load balancing is dynamic from one batch buffer to 229557772953STvrtko Ursulin * // another and transparent to userspace. 229657772953STvrtko Ursulin * 229757772953STvrtko Ursulin * ... 229857772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 229957772953STvrtko Ursulin * execbuf.flags = 0; // Submits to index 0 which is the virtual engine 230057772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 230157772953STvrtko Ursulin */ 230257772953STvrtko Ursulin 23036d06779eSChris Wilson /* 23046d06779eSChris Wilson * i915_context_engines_load_balance: 23056d06779eSChris Wilson * 23066d06779eSChris Wilson * Enable load balancing across this set of engines. 23076d06779eSChris Wilson * 23086d06779eSChris Wilson * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when 23096d06779eSChris Wilson * used will proxy the execbuffer request onto one of the set of engines 23106d06779eSChris Wilson * in such a way as to distribute the load evenly across the set. 23116d06779eSChris Wilson * 23126d06779eSChris Wilson * The set of engines must be compatible (e.g. the same HW class) as they 23136d06779eSChris Wilson * will share the same logical GPU context and ring. 23146d06779eSChris Wilson * 23156d06779eSChris Wilson * To intermix rendering with the virtual engine and direct rendering onto 23166d06779eSChris Wilson * the backing engines (bypassing the load balancing proxy), the context must 23176d06779eSChris Wilson * be defined to use a single timeline for all engines. 23186d06779eSChris Wilson */ 23196d06779eSChris Wilson struct i915_context_engines_load_balance { 23206d06779eSChris Wilson struct i915_user_extension base; 23216d06779eSChris Wilson 23226d06779eSChris Wilson __u16 engine_index; 23236d06779eSChris Wilson __u16 num_siblings; 23246d06779eSChris Wilson __u32 flags; /* all undefined flags must be zero */ 23256d06779eSChris Wilson 23266d06779eSChris Wilson __u64 mbz64; /* reserved for future use; must be zero */ 23276d06779eSChris Wilson 232894dfc73eSGustavo A. R. Silva struct i915_engine_class_instance engines[]; 23296d06779eSChris Wilson } __attribute__((packed)); 23306d06779eSChris Wilson 23316d06779eSChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ 23326d06779eSChris Wilson struct i915_user_extension base; \ 23336d06779eSChris Wilson __u16 engine_index; \ 23346d06779eSChris Wilson __u16 num_siblings; \ 23356d06779eSChris Wilson __u32 flags; \ 23366d06779eSChris Wilson __u64 mbz64; \ 23376d06779eSChris Wilson struct i915_engine_class_instance engines[N__]; \ 23386d06779eSChris Wilson } __attribute__((packed)) name__ 23396d06779eSChris Wilson 2340ee113690SChris Wilson /* 2341ee113690SChris Wilson * i915_context_engines_bond: 2342ee113690SChris Wilson * 2343ee113690SChris Wilson * Constructed bonded pairs for execution within a virtual engine. 2344ee113690SChris Wilson * 2345ee113690SChris Wilson * All engines are equal, but some are more equal than others. Given 2346ee113690SChris Wilson * the distribution of resources in the HW, it may be preferable to run 2347ee113690SChris Wilson * a request on a given subset of engines in parallel to a request on a 2348ee113690SChris Wilson * specific engine. We enable this selection of engines within a virtual 2349ee113690SChris Wilson * engine by specifying bonding pairs, for any given master engine we will 2350ee113690SChris Wilson * only execute on one of the corresponding siblings within the virtual engine. 2351ee113690SChris Wilson * 2352ee113690SChris Wilson * To execute a request in parallel on the master engine and a sibling requires 2353ee113690SChris Wilson * coordination with a I915_EXEC_FENCE_SUBMIT. 2354ee113690SChris Wilson */ 2355ee113690SChris Wilson struct i915_context_engines_bond { 2356ee113690SChris Wilson struct i915_user_extension base; 2357ee113690SChris Wilson 2358ee113690SChris Wilson struct i915_engine_class_instance master; 2359ee113690SChris Wilson 2360ee113690SChris Wilson __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ 2361ee113690SChris Wilson __u16 num_bonds; 2362ee113690SChris Wilson 2363ee113690SChris Wilson __u64 flags; /* all undefined flags must be zero */ 2364ee113690SChris Wilson __u64 mbz64[4]; /* reserved for future use; must be zero */ 2365ee113690SChris Wilson 236694dfc73eSGustavo A. R. Silva struct i915_engine_class_instance engines[]; 2367ee113690SChris Wilson } __attribute__((packed)); 2368ee113690SChris Wilson 2369ee113690SChris Wilson #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ 2370ee113690SChris Wilson struct i915_user_extension base; \ 2371ee113690SChris Wilson struct i915_engine_class_instance master; \ 2372ee113690SChris Wilson __u16 virtual_index; \ 2373ee113690SChris Wilson __u16 num_bonds; \ 2374ee113690SChris Wilson __u64 flags; \ 2375ee113690SChris Wilson __u64 mbz64[4]; \ 2376ee113690SChris Wilson struct i915_engine_class_instance engines[N__]; \ 2377ee113690SChris Wilson } __attribute__((packed)) name__ 2378ee113690SChris Wilson 237957772953STvrtko Ursulin /** 2380e5e32171SMatthew Brost * struct i915_context_engines_parallel_submit - Configure engine for 2381e5e32171SMatthew Brost * parallel submission. 2382e5e32171SMatthew Brost * 2383e5e32171SMatthew Brost * Setup a slot in the context engine map to allow multiple BBs to be submitted 2384e5e32171SMatthew Brost * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU 2385e5e32171SMatthew Brost * in parallel. Multiple hardware contexts are created internally in the i915 to 2386e5e32171SMatthew Brost * run these BBs. Once a slot is configured for N BBs only N BBs can be 2387e5e32171SMatthew Brost * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user 2388e5e32171SMatthew Brost * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how 2389e5e32171SMatthew Brost * many BBs there are based on the slot's configuration. The N BBs are the last 2390e5e32171SMatthew Brost * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set. 2391e5e32171SMatthew Brost * 2392e5e32171SMatthew Brost * The default placement behavior is to create implicit bonds between each 2393e5e32171SMatthew Brost * context if each context maps to more than 1 physical engine (e.g. context is 2394e5e32171SMatthew Brost * a virtual engine). Also we only allow contexts of same engine class and these 2395e5e32171SMatthew Brost * contexts must be in logically contiguous order. Examples of the placement 2396e5e32171SMatthew Brost * behavior are described below. Lastly, the default is to not allow BBs to be 2397e5e32171SMatthew Brost * preempted mid-batch. Rather insert coordinated preemption points on all 2398e5e32171SMatthew Brost * hardware contexts between each set of BBs. Flags could be added in the future 2399e5e32171SMatthew Brost * to change both of these default behaviors. 2400e5e32171SMatthew Brost * 2401e5e32171SMatthew Brost * Returns -EINVAL if hardware context placement configuration is invalid or if 2402e5e32171SMatthew Brost * the placement configuration isn't supported on the platform / submission 2403e5e32171SMatthew Brost * interface. 2404e5e32171SMatthew Brost * Returns -ENODEV if extension isn't supported on the platform / submission 2405e5e32171SMatthew Brost * interface. 2406e5e32171SMatthew Brost * 2407e5e32171SMatthew Brost * .. code-block:: none 2408e5e32171SMatthew Brost * 2409e5e32171SMatthew Brost * Examples syntax: 2410e5e32171SMatthew Brost * CS[X] = generic engine of same class, logical instance X 2411e5e32171SMatthew Brost * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE 2412e5e32171SMatthew Brost * 2413e5e32171SMatthew Brost * Example 1 pseudo code: 2414e5e32171SMatthew Brost * set_engines(INVALID) 2415e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=1, 2416e5e32171SMatthew Brost * engines=CS[0],CS[1]) 2417e5e32171SMatthew Brost * 2418e5e32171SMatthew Brost * Results in the following valid placement: 2419e5e32171SMatthew Brost * CS[0], CS[1] 2420e5e32171SMatthew Brost * 2421e5e32171SMatthew Brost * Example 2 pseudo code: 2422e5e32171SMatthew Brost * set_engines(INVALID) 2423e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 2424e5e32171SMatthew Brost * engines=CS[0],CS[2],CS[1],CS[3]) 2425e5e32171SMatthew Brost * 2426e5e32171SMatthew Brost * Results in the following valid placements: 2427e5e32171SMatthew Brost * CS[0], CS[1] 2428e5e32171SMatthew Brost * CS[2], CS[3] 2429e5e32171SMatthew Brost * 2430e5e32171SMatthew Brost * This can be thought of as two virtual engines, each containing two 2431e5e32171SMatthew Brost * engines thereby making a 2D array. However, there are bonds tying the 2432e5e32171SMatthew Brost * entries together and placing restrictions on how they can be scheduled. 2433e5e32171SMatthew Brost * Specifically, the scheduler can choose only vertical columns from the 2D 2434e5e32171SMatthew Brost * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the 2435e5e32171SMatthew Brost * scheduler wants to submit to CS[0], it must also choose CS[1] and vice 2436e5e32171SMatthew Brost * versa. Same for CS[2] requires also using CS[3]. 2437e5e32171SMatthew Brost * VE[0] = CS[0], CS[2] 2438e5e32171SMatthew Brost * VE[1] = CS[1], CS[3] 2439e5e32171SMatthew Brost * 2440e5e32171SMatthew Brost * Example 3 pseudo code: 2441e5e32171SMatthew Brost * set_engines(INVALID) 2442e5e32171SMatthew Brost * set_parallel(engine_index=0, width=2, num_siblings=2, 2443e5e32171SMatthew Brost * engines=CS[0],CS[1],CS[1],CS[3]) 2444e5e32171SMatthew Brost * 2445e5e32171SMatthew Brost * Results in the following valid and invalid placements: 2446e5e32171SMatthew Brost * CS[0], CS[1] 2447e5e32171SMatthew Brost * CS[1], CS[3] - Not logically contiguous, return -EINVAL 2448e5e32171SMatthew Brost */ 2449e5e32171SMatthew Brost struct i915_context_engines_parallel_submit { 2450e5e32171SMatthew Brost /** 2451e5e32171SMatthew Brost * @base: base user extension. 2452e5e32171SMatthew Brost */ 2453e5e32171SMatthew Brost struct i915_user_extension base; 2454e5e32171SMatthew Brost 2455e5e32171SMatthew Brost /** 2456e5e32171SMatthew Brost * @engine_index: slot for parallel engine 2457e5e32171SMatthew Brost */ 2458e5e32171SMatthew Brost __u16 engine_index; 2459e5e32171SMatthew Brost 2460e5e32171SMatthew Brost /** 2461e5e32171SMatthew Brost * @width: number of contexts per parallel engine or in other words the 2462e5e32171SMatthew Brost * number of batches in each submission 2463e5e32171SMatthew Brost */ 2464e5e32171SMatthew Brost __u16 width; 2465e5e32171SMatthew Brost 2466e5e32171SMatthew Brost /** 2467e5e32171SMatthew Brost * @num_siblings: number of siblings per context or in other words the 2468e5e32171SMatthew Brost * number of possible placements for each submission 2469e5e32171SMatthew Brost */ 2470e5e32171SMatthew Brost __u16 num_siblings; 2471e5e32171SMatthew Brost 2472e5e32171SMatthew Brost /** 2473e5e32171SMatthew Brost * @mbz16: reserved for future use; must be zero 2474e5e32171SMatthew Brost */ 2475e5e32171SMatthew Brost __u16 mbz16; 2476e5e32171SMatthew Brost 2477e5e32171SMatthew Brost /** 2478e5e32171SMatthew Brost * @flags: all undefined flags must be zero, currently not defined flags 2479e5e32171SMatthew Brost */ 2480e5e32171SMatthew Brost __u64 flags; 2481e5e32171SMatthew Brost 2482e5e32171SMatthew Brost /** 2483e5e32171SMatthew Brost * @mbz64: reserved for future use; must be zero 2484e5e32171SMatthew Brost */ 2485e5e32171SMatthew Brost __u64 mbz64[3]; 2486e5e32171SMatthew Brost 2487e5e32171SMatthew Brost /** 2488e5e32171SMatthew Brost * @engines: 2-d array of engine instances to configure parallel engine 2489e5e32171SMatthew Brost * 2490e5e32171SMatthew Brost * length = width (i) * num_siblings (j) 2491e5e32171SMatthew Brost * index = j + i * num_siblings 2492e5e32171SMatthew Brost */ 249394dfc73eSGustavo A. R. Silva struct i915_engine_class_instance engines[]; 2494e5e32171SMatthew Brost 2495e5e32171SMatthew Brost } __packed; 2496e5e32171SMatthew Brost 2497e5e32171SMatthew Brost #define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \ 2498e5e32171SMatthew Brost struct i915_user_extension base; \ 2499e5e32171SMatthew Brost __u16 engine_index; \ 2500e5e32171SMatthew Brost __u16 width; \ 2501e5e32171SMatthew Brost __u16 num_siblings; \ 2502e5e32171SMatthew Brost __u16 mbz16; \ 2503e5e32171SMatthew Brost __u64 flags; \ 2504e5e32171SMatthew Brost __u64 mbz64[3]; \ 2505e5e32171SMatthew Brost struct i915_engine_class_instance engines[N__]; \ 2506e5e32171SMatthew Brost } __attribute__((packed)) name__ 2507e5e32171SMatthew Brost 2508e5e32171SMatthew Brost /** 250957772953STvrtko Ursulin * DOC: Context Engine Map uAPI 251057772953STvrtko Ursulin * 251157772953STvrtko Ursulin * Context engine map is a new way of addressing engines when submitting batch- 251257772953STvrtko Ursulin * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT` 251357772953STvrtko Ursulin * inside the flags field of `struct drm_i915_gem_execbuffer2`. 251457772953STvrtko Ursulin * 251557772953STvrtko Ursulin * To use it created GEM contexts need to be configured with a list of engines 251657772953STvrtko Ursulin * the user is intending to submit to. This is accomplished using the 251757772953STvrtko Ursulin * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct 251857772953STvrtko Ursulin * i915_context_param_engines`. 251957772953STvrtko Ursulin * 252057772953STvrtko Ursulin * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the 252157772953STvrtko Ursulin * configured map. 252257772953STvrtko Ursulin * 252357772953STvrtko Ursulin * Example of creating such context and submitting against it: 252457772953STvrtko Ursulin * 252557772953STvrtko Ursulin * .. code-block:: C 252657772953STvrtko Ursulin * 252757772953STvrtko Ursulin * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = { 252857772953STvrtko Ursulin * .engines = { { I915_ENGINE_CLASS_RENDER, 0 }, 252957772953STvrtko Ursulin * { I915_ENGINE_CLASS_COPY, 0 } } 253057772953STvrtko Ursulin * }; 253157772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext_setparam p_engines = { 253257772953STvrtko Ursulin * .base = { 253357772953STvrtko Ursulin * .name = I915_CONTEXT_CREATE_EXT_SETPARAM, 253457772953STvrtko Ursulin * }, 253557772953STvrtko Ursulin * .param = { 253657772953STvrtko Ursulin * .param = I915_CONTEXT_PARAM_ENGINES, 253757772953STvrtko Ursulin * .value = to_user_pointer(&engines), 253857772953STvrtko Ursulin * .size = sizeof(engines), 253957772953STvrtko Ursulin * }, 254057772953STvrtko Ursulin * }; 254157772953STvrtko Ursulin * struct drm_i915_gem_context_create_ext create = { 254257772953STvrtko Ursulin * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS, 254357772953STvrtko Ursulin * .extensions = to_user_pointer(&p_engines); 254457772953STvrtko Ursulin * }; 254557772953STvrtko Ursulin * 254657772953STvrtko Ursulin * ctx_id = gem_context_create_ext(drm_fd, &create); 254757772953STvrtko Ursulin * 254857772953STvrtko Ursulin * // We have now created a GEM context with two engines in the map: 254957772953STvrtko Ursulin * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines 255057772953STvrtko Ursulin * // will not be accessible from this context. 255157772953STvrtko Ursulin * 255257772953STvrtko Ursulin * ... 255357772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 255457772953STvrtko Ursulin * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context 255557772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 255657772953STvrtko Ursulin * 255757772953STvrtko Ursulin * ... 255857772953STvrtko Ursulin * execbuf.rsvd1 = ctx_id; 255957772953STvrtko Ursulin * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context 256057772953STvrtko Ursulin * gem_execbuf(drm_fd, &execbuf); 256157772953STvrtko Ursulin */ 256257772953STvrtko Ursulin 2563976b55f0SChris Wilson struct i915_context_param_engines { 2564976b55f0SChris Wilson __u64 extensions; /* linked chain of extension blocks, 0 terminates */ 25656d06779eSChris Wilson #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ 2566ee113690SChris Wilson #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ 2567e5e32171SMatthew Brost #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */ 256802abecdeSGustavo A. R. Silva struct i915_engine_class_instance engines[]; 2569976b55f0SChris Wilson } __attribute__((packed)); 2570976b55f0SChris Wilson 2571976b55f0SChris Wilson #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ 2572976b55f0SChris Wilson __u64 extensions; \ 2573976b55f0SChris Wilson struct i915_engine_class_instance engines[N__]; \ 2574976b55f0SChris Wilson } __attribute__((packed)) name__ 2575976b55f0SChris Wilson 2576*0f1bb41bSTvrtko Ursulin struct i915_gem_context_param_context_image { 2577*0f1bb41bSTvrtko Ursulin /** @engine: Engine class & instance to be configured. */ 2578*0f1bb41bSTvrtko Ursulin struct i915_engine_class_instance engine; 2579*0f1bb41bSTvrtko Ursulin 2580*0f1bb41bSTvrtko Ursulin /** @flags: One of the supported flags or zero. */ 2581*0f1bb41bSTvrtko Ursulin __u32 flags; 2582*0f1bb41bSTvrtko Ursulin #define I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX (1u << 0) 2583*0f1bb41bSTvrtko Ursulin 2584*0f1bb41bSTvrtko Ursulin /** @size: Size of the image blob pointed to by @image. */ 2585*0f1bb41bSTvrtko Ursulin __u32 size; 2586*0f1bb41bSTvrtko Ursulin 2587*0f1bb41bSTvrtko Ursulin /** @mbz: Must be zero. */ 2588*0f1bb41bSTvrtko Ursulin __u32 mbz; 2589*0f1bb41bSTvrtko Ursulin 2590*0f1bb41bSTvrtko Ursulin /** @image: Userspace memory containing the context image. */ 2591*0f1bb41bSTvrtko Ursulin __u64 image; 2592*0f1bb41bSTvrtko Ursulin } __attribute__((packed)); 2593*0f1bb41bSTvrtko Ursulin 2594a913bde8SNiranjana Vishwanathapura /** 2595a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_context_create_ext_setparam - Context parameter 2596a913bde8SNiranjana Vishwanathapura * to set or query during context creation. 2597a913bde8SNiranjana Vishwanathapura */ 2598b9171541SChris Wilson struct drm_i915_gem_context_create_ext_setparam { 2599a913bde8SNiranjana Vishwanathapura /** @base: Extension link. See struct i915_user_extension. */ 2600b9171541SChris Wilson struct i915_user_extension base; 2601a913bde8SNiranjana Vishwanathapura 2602a913bde8SNiranjana Vishwanathapura /** 2603a913bde8SNiranjana Vishwanathapura * @param: Context parameter to set or query. 2604a913bde8SNiranjana Vishwanathapura * See struct drm_i915_gem_context_param. 2605a913bde8SNiranjana Vishwanathapura */ 2606b9171541SChris Wilson struct drm_i915_gem_context_param param; 2607b9171541SChris Wilson }; 2608b9171541SChris Wilson 2609b9171541SChris Wilson struct drm_i915_gem_context_destroy { 2610b9171541SChris Wilson __u32 ctx_id; 2611b9171541SChris Wilson __u32 pad; 2612b9171541SChris Wilson }; 2613b9171541SChris Wilson 2614a913bde8SNiranjana Vishwanathapura /** 2615a913bde8SNiranjana Vishwanathapura * struct drm_i915_gem_vm_control - Structure to create or destroy VM. 2616a913bde8SNiranjana Vishwanathapura * 2617b9171541SChris Wilson * DRM_I915_GEM_VM_CREATE - 2618b9171541SChris Wilson * 2619b9171541SChris Wilson * Create a new virtual memory address space (ppGTT) for use within a context 2620b9171541SChris Wilson * on the same file. Extensions can be provided to configure exactly how the 2621b9171541SChris Wilson * address space is setup upon creation. 2622b9171541SChris Wilson * 2623b9171541SChris Wilson * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 2624b9171541SChris Wilson * returned in the outparam @id. 2625b9171541SChris Wilson * 2626b9171541SChris Wilson * An extension chain maybe provided, starting with @extensions, and terminated 2627b9171541SChris Wilson * by the @next_extension being 0. Currently, no extensions are defined. 2628b9171541SChris Wilson * 2629b9171541SChris Wilson * DRM_I915_GEM_VM_DESTROY - 2630b9171541SChris Wilson * 2631a913bde8SNiranjana Vishwanathapura * Destroys a previously created VM id, specified in @vm_id. 2632b9171541SChris Wilson * 2633b9171541SChris Wilson * No extensions or flags are allowed currently, and so must be zero. 2634b9171541SChris Wilson */ 2635b9171541SChris Wilson struct drm_i915_gem_vm_control { 2636a913bde8SNiranjana Vishwanathapura /** @extensions: Zero-terminated chain of extensions. */ 2637b9171541SChris Wilson __u64 extensions; 2638a913bde8SNiranjana Vishwanathapura 2639a913bde8SNiranjana Vishwanathapura /** @flags: reserved for future usage, currently MBZ */ 2640b9171541SChris Wilson __u32 flags; 2641a913bde8SNiranjana Vishwanathapura 2642a913bde8SNiranjana Vishwanathapura /** @vm_id: Id of the VM created or to be destroyed */ 2643b9171541SChris Wilson __u32 vm_id; 2644b9171541SChris Wilson }; 2645b9171541SChris Wilson 2646b9171541SChris Wilson struct drm_i915_reg_read { 2647b9171541SChris Wilson /* 2648b9171541SChris Wilson * Register offset. 2649b9171541SChris Wilson * For 64bit wide registers where the upper 32bits don't immediately 2650b9171541SChris Wilson * follow the lower 32bits, the offset of the lower 32bits must 2651b9171541SChris Wilson * be specified 2652b9171541SChris Wilson */ 2653b9171541SChris Wilson __u64 offset; 2654b9171541SChris Wilson #define I915_REG_READ_8B_WA (1ul << 0) 2655b9171541SChris Wilson 2656b9171541SChris Wilson __u64 val; /* Return value */ 2657b9171541SChris Wilson }; 2658b9171541SChris Wilson 2659b9171541SChris Wilson /* Known registers: 2660b9171541SChris Wilson * 2661b9171541SChris Wilson * Render engine timestamp - 0x2358 + 64bit - gen7+ 2662b9171541SChris Wilson * - Note this register returns an invalid value if using the default 2663b9171541SChris Wilson * single instruction 8byte read, in order to workaround that pass 2664b9171541SChris Wilson * flag I915_REG_READ_8B_WA in offset field. 2665b9171541SChris Wilson * 2666b9171541SChris Wilson */ 2667b9171541SChris Wilson 2668d10612f8SNirmoy Das /* 2669d10612f8SNirmoy Das * struct drm_i915_reset_stats - Return global reset and other context stats 2670d10612f8SNirmoy Das * 2671d10612f8SNirmoy Das * Driver keeps few stats for each contexts and also global reset count. 2672d10612f8SNirmoy Das * This struct can be used to query those stats. 2673d10612f8SNirmoy Das */ 2674b9171541SChris Wilson struct drm_i915_reset_stats { 2675d10612f8SNirmoy Das /** @ctx_id: ID of the requested context */ 2676b9171541SChris Wilson __u32 ctx_id; 2677d10612f8SNirmoy Das 2678d10612f8SNirmoy Das /** @flags: MBZ */ 2679b9171541SChris Wilson __u32 flags; 2680b9171541SChris Wilson 2681d10612f8SNirmoy Das /** @reset_count: All resets since boot/module reload, for all contexts */ 2682b9171541SChris Wilson __u32 reset_count; 2683b9171541SChris Wilson 2684d10612f8SNirmoy Das /** @batch_active: Number of batches lost when active in GPU, for this context */ 2685b9171541SChris Wilson __u32 batch_active; 2686b9171541SChris Wilson 2687d10612f8SNirmoy Das /** @batch_pending: Number of batches lost pending for execution, for this context */ 2688b9171541SChris Wilson __u32 batch_pending; 2689b9171541SChris Wilson 2690d10612f8SNirmoy Das /** @pad: MBZ */ 2691b9171541SChris Wilson __u32 pad; 2692b9171541SChris Wilson }; 2693b9171541SChris Wilson 2694aef7b67aSMatthew Auld /** 2695aef7b67aSMatthew Auld * struct drm_i915_gem_userptr - Create GEM object from user allocated memory. 2696aef7b67aSMatthew Auld * 2697aef7b67aSMatthew Auld * Userptr objects have several restrictions on what ioctls can be used with the 2698aef7b67aSMatthew Auld * object handle. 2699aef7b67aSMatthew Auld */ 2700b9171541SChris Wilson struct drm_i915_gem_userptr { 2701aef7b67aSMatthew Auld /** 2702aef7b67aSMatthew Auld * @user_ptr: The pointer to the allocated memory. 2703aef7b67aSMatthew Auld * 2704aef7b67aSMatthew Auld * Needs to be aligned to PAGE_SIZE. 2705aef7b67aSMatthew Auld */ 2706b9171541SChris Wilson __u64 user_ptr; 2707aef7b67aSMatthew Auld 2708aef7b67aSMatthew Auld /** 2709aef7b67aSMatthew Auld * @user_size: 2710aef7b67aSMatthew Auld * 2711aef7b67aSMatthew Auld * The size in bytes for the allocated memory. This will also become the 2712aef7b67aSMatthew Auld * object size. 2713aef7b67aSMatthew Auld * 2714aef7b67aSMatthew Auld * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE, 2715aef7b67aSMatthew Auld * or larger. 2716aef7b67aSMatthew Auld */ 2717b9171541SChris Wilson __u64 user_size; 2718aef7b67aSMatthew Auld 2719aef7b67aSMatthew Auld /** 2720aef7b67aSMatthew Auld * @flags: 2721aef7b67aSMatthew Auld * 2722aef7b67aSMatthew Auld * Supported flags: 2723aef7b67aSMatthew Auld * 2724aef7b67aSMatthew Auld * I915_USERPTR_READ_ONLY: 2725aef7b67aSMatthew Auld * 2726aef7b67aSMatthew Auld * Mark the object as readonly, this also means GPU access can only be 2727aef7b67aSMatthew Auld * readonly. This is only supported on HW which supports readonly access 2728aef7b67aSMatthew Auld * through the GTT. If the HW can't support readonly access, an error is 2729aef7b67aSMatthew Auld * returned. 2730aef7b67aSMatthew Auld * 2731b65a9489SChris Wilson * I915_USERPTR_PROBE: 2732b65a9489SChris Wilson * 2733b65a9489SChris Wilson * Probe the provided @user_ptr range and validate that the @user_ptr is 2734b65a9489SChris Wilson * indeed pointing to normal memory and that the range is also valid. 2735b65a9489SChris Wilson * For example if some garbage address is given to the kernel, then this 2736b65a9489SChris Wilson * should complain. 2737b65a9489SChris Wilson * 2738b65a9489SChris Wilson * Returns -EFAULT if the probe failed. 2739b65a9489SChris Wilson * 2740b65a9489SChris Wilson * Note that this doesn't populate the backing pages, and also doesn't 2741b65a9489SChris Wilson * guarantee that the object will remain valid when the object is 2742b65a9489SChris Wilson * eventually used. 2743b65a9489SChris Wilson * 2744b65a9489SChris Wilson * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE 2745b65a9489SChris Wilson * returns a non-zero value. 2746b65a9489SChris Wilson * 2747aef7b67aSMatthew Auld * I915_USERPTR_UNSYNCHRONIZED: 2748aef7b67aSMatthew Auld * 2749aef7b67aSMatthew Auld * NOT USED. Setting this flag will result in an error. 2750aef7b67aSMatthew Auld */ 2751b9171541SChris Wilson __u32 flags; 2752b9171541SChris Wilson #define I915_USERPTR_READ_ONLY 0x1 2753b65a9489SChris Wilson #define I915_USERPTR_PROBE 0x2 2754b9171541SChris Wilson #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 2755b9171541SChris Wilson /** 2756aef7b67aSMatthew Auld * @handle: Returned handle for the object. 2757b9171541SChris Wilson * 2758b9171541SChris Wilson * Object handles are nonzero. 2759b9171541SChris Wilson */ 2760b9171541SChris Wilson __u32 handle; 2761b9171541SChris Wilson }; 2762b9171541SChris Wilson 2763d7965152SRobert Bragg enum drm_i915_oa_format { 276419f81df2SRobert Bragg I915_OA_FORMAT_A13 = 1, /* HSW only */ 276519f81df2SRobert Bragg I915_OA_FORMAT_A29, /* HSW only */ 276619f81df2SRobert Bragg I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 276719f81df2SRobert Bragg I915_OA_FORMAT_B4_C8, /* HSW only */ 276819f81df2SRobert Bragg I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 276919f81df2SRobert Bragg I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 277019f81df2SRobert Bragg I915_OA_FORMAT_C4_B8, /* HSW+ */ 277119f81df2SRobert Bragg 277219f81df2SRobert Bragg /* Gen8+ */ 277319f81df2SRobert Bragg I915_OA_FORMAT_A12, 277419f81df2SRobert Bragg I915_OA_FORMAT_A12_B8_C8, 277519f81df2SRobert Bragg I915_OA_FORMAT_A32u40_A4u32_B8_C8, 2776d7965152SRobert Bragg 277781d5f7d9SUmesh Nerlige Ramappa /* DG2 */ 277881d5f7d9SUmesh Nerlige Ramappa I915_OAR_FORMAT_A32u40_A4u32_B8_C8, 277981d5f7d9SUmesh Nerlige Ramappa I915_OA_FORMAT_A24u40_A14u32_B8_C8, 278081d5f7d9SUmesh Nerlige Ramappa 27811cc064dcSUmesh Nerlige Ramappa /* MTL OAM */ 27821cc064dcSUmesh Nerlige Ramappa I915_OAM_FORMAT_MPEC8u64_B8_C8, 27831cc064dcSUmesh Nerlige Ramappa I915_OAM_FORMAT_MPEC8u32_B8_C8, 27841cc064dcSUmesh Nerlige Ramappa 2785d7965152SRobert Bragg I915_OA_FORMAT_MAX /* non-ABI */ 2786d7965152SRobert Bragg }; 2787d7965152SRobert Bragg 2788eec688e1SRobert Bragg enum drm_i915_perf_property_id { 2789eec688e1SRobert Bragg /** 2790eec688e1SRobert Bragg * Open the stream for a specific context handle (as used with 2791eec688e1SRobert Bragg * execbuffer2). A stream opened for a specific context this way 2792eec688e1SRobert Bragg * won't typically require root privileges. 2793b8d49f28SLionel Landwerlin * 2794b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2795eec688e1SRobert Bragg */ 2796eec688e1SRobert Bragg DRM_I915_PERF_PROP_CTX_HANDLE = 1, 2797eec688e1SRobert Bragg 2798d7965152SRobert Bragg /** 2799d7965152SRobert Bragg * A value of 1 requests the inclusion of raw OA unit reports as 2800d7965152SRobert Bragg * part of stream samples. 2801b8d49f28SLionel Landwerlin * 2802b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2803d7965152SRobert Bragg */ 2804d7965152SRobert Bragg DRM_I915_PERF_PROP_SAMPLE_OA, 2805d7965152SRobert Bragg 2806d7965152SRobert Bragg /** 2807d7965152SRobert Bragg * The value specifies which set of OA unit metrics should be 280866137f54SRandy Dunlap * configured, defining the contents of any OA unit reports. 2809b8d49f28SLionel Landwerlin * 2810b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2811d7965152SRobert Bragg */ 2812d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_METRICS_SET, 2813d7965152SRobert Bragg 2814d7965152SRobert Bragg /** 2815d7965152SRobert Bragg * The value specifies the size and layout of OA unit reports. 2816b8d49f28SLionel Landwerlin * 2817b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2818d7965152SRobert Bragg */ 2819d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_FORMAT, 2820d7965152SRobert Bragg 2821d7965152SRobert Bragg /** 2822d7965152SRobert Bragg * Specifying this property implicitly requests periodic OA unit 2823d7965152SRobert Bragg * sampling and (at least on Haswell) the sampling frequency is derived 2824d7965152SRobert Bragg * from this exponent as follows: 2825d7965152SRobert Bragg * 2826d7965152SRobert Bragg * 80ns * 2^(period_exponent + 1) 2827b8d49f28SLionel Landwerlin * 2828b8d49f28SLionel Landwerlin * This property is available in perf revision 1. 2829d7965152SRobert Bragg */ 2830d7965152SRobert Bragg DRM_I915_PERF_PROP_OA_EXPONENT, 2831d7965152SRobert Bragg 28329cd20ef7SLionel Landwerlin /** 28339cd20ef7SLionel Landwerlin * Specifying this property is only valid when specify a context to 28349cd20ef7SLionel Landwerlin * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property 28359cd20ef7SLionel Landwerlin * will hold preemption of the particular context we want to gather 28369cd20ef7SLionel Landwerlin * performance data about. The execbuf2 submissions must include a 28379cd20ef7SLionel Landwerlin * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. 28389cd20ef7SLionel Landwerlin * 28399cd20ef7SLionel Landwerlin * This property is available in perf revision 3. 28409cd20ef7SLionel Landwerlin */ 28419cd20ef7SLionel Landwerlin DRM_I915_PERF_PROP_HOLD_PREEMPTION, 28429cd20ef7SLionel Landwerlin 284311ecbdddSLionel Landwerlin /** 284411ecbdddSLionel Landwerlin * Specifying this pins all contexts to the specified SSEU power 284511ecbdddSLionel Landwerlin * configuration for the duration of the recording. 284611ecbdddSLionel Landwerlin * 284711ecbdddSLionel Landwerlin * This parameter's value is a pointer to a struct 284811ecbdddSLionel Landwerlin * drm_i915_gem_context_param_sseu. 284911ecbdddSLionel Landwerlin * 285011ecbdddSLionel Landwerlin * This property is available in perf revision 4. 285111ecbdddSLionel Landwerlin */ 285211ecbdddSLionel Landwerlin DRM_I915_PERF_PROP_GLOBAL_SSEU, 285311ecbdddSLionel Landwerlin 28544ef10fe0SLionel Landwerlin /** 28554ef10fe0SLionel Landwerlin * This optional parameter specifies the timer interval in nanoseconds 28564ef10fe0SLionel Landwerlin * at which the i915 driver will check the OA buffer for available data. 28574ef10fe0SLionel Landwerlin * Minimum allowed value is 100 microseconds. A default value is used by 28584ef10fe0SLionel Landwerlin * the driver if this parameter is not specified. Note that larger timer 28594ef10fe0SLionel Landwerlin * values will reduce cpu consumption during OA perf captures. However, 28604ef10fe0SLionel Landwerlin * excessively large values would potentially result in OA buffer 28614ef10fe0SLionel Landwerlin * overwrites as captures reach end of the OA buffer. 28624ef10fe0SLionel Landwerlin * 28634ef10fe0SLionel Landwerlin * This property is available in perf revision 5. 28644ef10fe0SLionel Landwerlin */ 28654ef10fe0SLionel Landwerlin DRM_I915_PERF_PROP_POLL_OA_PERIOD, 28664ef10fe0SLionel Landwerlin 2867c61d04c9SUmesh Nerlige Ramappa /** 2868c61d04c9SUmesh Nerlige Ramappa * Multiple engines may be mapped to the same OA unit. The OA unit is 2869c61d04c9SUmesh Nerlige Ramappa * identified by class:instance of any engine mapped to it. 2870c61d04c9SUmesh Nerlige Ramappa * 2871c61d04c9SUmesh Nerlige Ramappa * This parameter specifies the engine class and must be passed along 2872c61d04c9SUmesh Nerlige Ramappa * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE. 2873c61d04c9SUmesh Nerlige Ramappa * 2874c61d04c9SUmesh Nerlige Ramappa * This property is available in perf revision 6. 2875c61d04c9SUmesh Nerlige Ramappa */ 2876c61d04c9SUmesh Nerlige Ramappa DRM_I915_PERF_PROP_OA_ENGINE_CLASS, 2877c61d04c9SUmesh Nerlige Ramappa 2878c61d04c9SUmesh Nerlige Ramappa /** 2879c61d04c9SUmesh Nerlige Ramappa * This parameter specifies the engine instance and must be passed along 2880c61d04c9SUmesh Nerlige Ramappa * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS. 2881c61d04c9SUmesh Nerlige Ramappa * 2882c61d04c9SUmesh Nerlige Ramappa * This property is available in perf revision 6. 2883c61d04c9SUmesh Nerlige Ramappa */ 2884c61d04c9SUmesh Nerlige Ramappa DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE, 2885c61d04c9SUmesh Nerlige Ramappa 2886eec688e1SRobert Bragg DRM_I915_PERF_PROP_MAX /* non-ABI */ 2887eec688e1SRobert Bragg }; 2888eec688e1SRobert Bragg 2889eec688e1SRobert Bragg struct drm_i915_perf_open_param { 2890eec688e1SRobert Bragg __u32 flags; 2891eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 2892eec688e1SRobert Bragg #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 2893eec688e1SRobert Bragg #define I915_PERF_FLAG_DISABLED (1<<2) 2894eec688e1SRobert Bragg 2895eec688e1SRobert Bragg /** The number of u64 (id, value) pairs */ 2896eec688e1SRobert Bragg __u32 num_properties; 2897eec688e1SRobert Bragg 2898eec688e1SRobert Bragg /** 2899eec688e1SRobert Bragg * Pointer to array of u64 (id, value) pairs configuring the stream 2900eec688e1SRobert Bragg * to open. 2901eec688e1SRobert Bragg */ 2902cd8bddc4SChris Wilson __u64 properties_ptr; 2903eec688e1SRobert Bragg }; 2904eec688e1SRobert Bragg 29052ef6a01fSMatthew Auld /* 2906d7965152SRobert Bragg * Enable data capture for a stream that was either opened in a disabled state 2907d7965152SRobert Bragg * via I915_PERF_FLAG_DISABLED or was later disabled via 2908d7965152SRobert Bragg * I915_PERF_IOCTL_DISABLE. 2909d7965152SRobert Bragg * 2910d7965152SRobert Bragg * It is intended to be cheaper to disable and enable a stream than it may be 2911d7965152SRobert Bragg * to close and re-open a stream with the same configuration. 2912d7965152SRobert Bragg * 2913d7965152SRobert Bragg * It's undefined whether any pending data for the stream will be lost. 2914b8d49f28SLionel Landwerlin * 2915b8d49f28SLionel Landwerlin * This ioctl is available in perf revision 1. 2916d7965152SRobert Bragg */ 2917eec688e1SRobert Bragg #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 2918d7965152SRobert Bragg 29192ef6a01fSMatthew Auld /* 2920d7965152SRobert Bragg * Disable data capture for a stream. 2921d7965152SRobert Bragg * 2922d7965152SRobert Bragg * It is an error to try and read a stream that is disabled. 2923b8d49f28SLionel Landwerlin * 2924b8d49f28SLionel Landwerlin * This ioctl is available in perf revision 1. 2925d7965152SRobert Bragg */ 2926eec688e1SRobert Bragg #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 2927eec688e1SRobert Bragg 29282ef6a01fSMatthew Auld /* 29297831e9a9SChris Wilson * Change metrics_set captured by a stream. 29307831e9a9SChris Wilson * 29317831e9a9SChris Wilson * If the stream is bound to a specific context, the configuration change 29327831e9a9SChris Wilson * will performed inline with that context such that it takes effect before 29337831e9a9SChris Wilson * the next execbuf submission. 29347831e9a9SChris Wilson * 29357831e9a9SChris Wilson * Returns the previously bound metrics set id, or a negative error code. 29367831e9a9SChris Wilson * 29377831e9a9SChris Wilson * This ioctl is available in perf revision 2. 29387831e9a9SChris Wilson */ 29397831e9a9SChris Wilson #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) 29407831e9a9SChris Wilson 29412ef6a01fSMatthew Auld /* 2942eec688e1SRobert Bragg * Common to all i915 perf records 2943eec688e1SRobert Bragg */ 2944eec688e1SRobert Bragg struct drm_i915_perf_record_header { 2945eec688e1SRobert Bragg __u32 type; 2946eec688e1SRobert Bragg __u16 pad; 2947eec688e1SRobert Bragg __u16 size; 2948eec688e1SRobert Bragg }; 2949eec688e1SRobert Bragg 2950eec688e1SRobert Bragg enum drm_i915_perf_record_type { 2951eec688e1SRobert Bragg 2952eec688e1SRobert Bragg /** 2953eec688e1SRobert Bragg * Samples are the work horse record type whose contents are extensible 2954eec688e1SRobert Bragg * and defined when opening an i915 perf stream based on the given 2955eec688e1SRobert Bragg * properties. 2956eec688e1SRobert Bragg * 2957eec688e1SRobert Bragg * Boolean properties following the naming convention 2958eec688e1SRobert Bragg * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 2959eec688e1SRobert Bragg * every sample. 2960eec688e1SRobert Bragg * 2961eec688e1SRobert Bragg * The order of these sample properties given by userspace has no 2962d7965152SRobert Bragg * affect on the ordering of data within a sample. The order is 2963eec688e1SRobert Bragg * documented here. 2964eec688e1SRobert Bragg * 2965eec688e1SRobert Bragg * struct { 2966eec688e1SRobert Bragg * struct drm_i915_perf_record_header header; 2967eec688e1SRobert Bragg * 2968d7965152SRobert Bragg * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 2969eec688e1SRobert Bragg * }; 2970eec688e1SRobert Bragg */ 2971eec688e1SRobert Bragg DRM_I915_PERF_RECORD_SAMPLE = 1, 2972eec688e1SRobert Bragg 2973d7965152SRobert Bragg /* 2974d7965152SRobert Bragg * Indicates that one or more OA reports were not written by the 2975d7965152SRobert Bragg * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 2976d7965152SRobert Bragg * command collides with periodic sampling - which would be more likely 2977d7965152SRobert Bragg * at higher sampling frequencies. 2978d7965152SRobert Bragg */ 2979d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 2980d7965152SRobert Bragg 2981d7965152SRobert Bragg /** 2982d7965152SRobert Bragg * An error occurred that resulted in all pending OA reports being lost. 2983d7965152SRobert Bragg */ 2984d7965152SRobert Bragg DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 2985d7965152SRobert Bragg 2986eec688e1SRobert Bragg DRM_I915_PERF_RECORD_MAX /* non-ABI */ 2987eec688e1SRobert Bragg }; 2988eec688e1SRobert Bragg 2989a2e54026SMatt Roper /** 2990a2e54026SMatt Roper * struct drm_i915_perf_oa_config 2991a2e54026SMatt Roper * 2992f89823c2SLionel Landwerlin * Structure to upload perf dynamic configuration into the kernel. 2993f89823c2SLionel Landwerlin */ 2994f89823c2SLionel Landwerlin struct drm_i915_perf_oa_config { 2995a2e54026SMatt Roper /** 2996a2e54026SMatt Roper * @uuid: 2997a2e54026SMatt Roper * 2998a2e54026SMatt Roper * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" 2999a2e54026SMatt Roper */ 3000f89823c2SLionel Landwerlin char uuid[36]; 3001f89823c2SLionel Landwerlin 3002a2e54026SMatt Roper /** 3003a2e54026SMatt Roper * @n_mux_regs: 3004a2e54026SMatt Roper * 3005a2e54026SMatt Roper * Number of mux regs in &mux_regs_ptr. 3006a2e54026SMatt Roper */ 3007f89823c2SLionel Landwerlin __u32 n_mux_regs; 3008a2e54026SMatt Roper 3009a2e54026SMatt Roper /** 3010a2e54026SMatt Roper * @n_boolean_regs: 3011a2e54026SMatt Roper * 3012a2e54026SMatt Roper * Number of boolean regs in &boolean_regs_ptr. 3013a2e54026SMatt Roper */ 3014f89823c2SLionel Landwerlin __u32 n_boolean_regs; 3015a2e54026SMatt Roper 3016a2e54026SMatt Roper /** 3017a2e54026SMatt Roper * @n_flex_regs: 3018a2e54026SMatt Roper * 3019a2e54026SMatt Roper * Number of flex regs in &flex_regs_ptr. 3020a2e54026SMatt Roper */ 3021f89823c2SLionel Landwerlin __u32 n_flex_regs; 3022f89823c2SLionel Landwerlin 3023a2e54026SMatt Roper /** 3024a2e54026SMatt Roper * @mux_regs_ptr: 3025a2e54026SMatt Roper * 3026a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 3027a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 3028a2e54026SMatt Roper * &n_mux_regs). 3029ee427e25SLionel Landwerlin */ 303017ad4fddSChris Wilson __u64 mux_regs_ptr; 3031a2e54026SMatt Roper 3032a2e54026SMatt Roper /** 3033a2e54026SMatt Roper * @boolean_regs_ptr: 3034a2e54026SMatt Roper * 3035a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 3036a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 3037a2e54026SMatt Roper * &n_boolean_regs). 3038a2e54026SMatt Roper */ 303917ad4fddSChris Wilson __u64 boolean_regs_ptr; 3040a2e54026SMatt Roper 3041a2e54026SMatt Roper /** 3042a2e54026SMatt Roper * @flex_regs_ptr: 3043a2e54026SMatt Roper * 3044a2e54026SMatt Roper * Pointer to tuples of u32 values (register address, value) for mux 3045a2e54026SMatt Roper * registers. Expected length of buffer is (2 * sizeof(u32) * 3046a2e54026SMatt Roper * &n_flex_regs). 3047a2e54026SMatt Roper */ 304817ad4fddSChris Wilson __u64 flex_regs_ptr; 3049f89823c2SLionel Landwerlin }; 3050f89823c2SLionel Landwerlin 3051e3bdccafSMatthew Auld /** 3052e3bdccafSMatthew Auld * struct drm_i915_query_item - An individual query for the kernel to process. 3053e3bdccafSMatthew Auld * 3054e3bdccafSMatthew Auld * The behaviour is determined by the @query_id. Note that exactly what 3055e3bdccafSMatthew Auld * @data_ptr is also depends on the specific @query_id. 3056e3bdccafSMatthew Auld */ 3057a446ae2cSLionel Landwerlin struct drm_i915_query_item { 30581c671ad7SMatt Roper /** 30591c671ad7SMatt Roper * @query_id: 30601c671ad7SMatt Roper * 30611c671ad7SMatt Roper * The id for this query. Currently accepted query IDs are: 30621c671ad7SMatt Roper * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info) 30631c671ad7SMatt Roper * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info) 30641c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config) 30651c671ad7SMatt Roper * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) 30661c671ad7SMatt Roper * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) 3067c94fde8fSMatt Atwood * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) 3068b1123648STvrtko Ursulin * - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version) 30691c671ad7SMatt Roper */ 3070a446ae2cSLionel Landwerlin __u64 query_id; 3071c822e059SLionel Landwerlin #define DRM_I915_QUERY_TOPOLOGY_INFO 1 3072c5d3e39cSTvrtko Ursulin #define DRM_I915_QUERY_ENGINE_INFO 2 30734f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG 3 307471021729SAbdiel Janulgue #define DRM_I915_QUERY_MEMORY_REGIONS 4 307578e1fb31SRodrigo Vivi #define DRM_I915_QUERY_HWCONFIG_BLOB 5 3076c94fde8fSMatt Atwood #define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 3077b1123648STvrtko Ursulin #define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7 3078be03564bSChris Wilson /* Must be kept compact -- no holes and well documented */ 3079a446ae2cSLionel Landwerlin 3080e3bdccafSMatthew Auld /** 3081e3bdccafSMatthew Auld * @length: 3082e3bdccafSMatthew Auld * 3083a446ae2cSLionel Landwerlin * When set to zero by userspace, this is filled with the size of the 3084e3bdccafSMatthew Auld * data to be written at the @data_ptr pointer. The kernel sets this 3085a446ae2cSLionel Landwerlin * value to a negative value to signal an error on a particular query 3086a446ae2cSLionel Landwerlin * item. 3087a446ae2cSLionel Landwerlin */ 3088a446ae2cSLionel Landwerlin __s32 length; 3089a446ae2cSLionel Landwerlin 3090e3bdccafSMatthew Auld /** 3091e3bdccafSMatthew Auld * @flags: 3092e3bdccafSMatthew Auld * 30931c671ad7SMatt Roper * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. 30944f6ccc74SLionel Landwerlin * 30951c671ad7SMatt Roper * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the 30964f6ccc74SLionel Landwerlin * following: 3097e3bdccafSMatthew Auld * 30981c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_LIST 30991c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 31001c671ad7SMatt Roper * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID 3101c94fde8fSMatt Atwood * 3102c94fde8fSMatt Atwood * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain 3103c94fde8fSMatt Atwood * a struct i915_engine_class_instance that references a render engine. 3104a446ae2cSLionel Landwerlin */ 3105a446ae2cSLionel Landwerlin __u32 flags; 31064f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_LIST 1 31074f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 31084f6ccc74SLionel Landwerlin #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 3109a446ae2cSLionel Landwerlin 3110e3bdccafSMatthew Auld /** 3111e3bdccafSMatthew Auld * @data_ptr: 3112e3bdccafSMatthew Auld * 3113e3bdccafSMatthew Auld * Data will be written at the location pointed by @data_ptr when the 3114e3bdccafSMatthew Auld * value of @length matches the length of the data to be written by the 3115a446ae2cSLionel Landwerlin * kernel. 3116a446ae2cSLionel Landwerlin */ 3117a446ae2cSLionel Landwerlin __u64 data_ptr; 3118a446ae2cSLionel Landwerlin }; 3119a446ae2cSLionel Landwerlin 3120e3bdccafSMatthew Auld /** 3121e3bdccafSMatthew Auld * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the 3122e3bdccafSMatthew Auld * kernel to fill out. 3123e3bdccafSMatthew Auld * 3124e3bdccafSMatthew Auld * Note that this is generally a two step process for each struct 3125e3bdccafSMatthew Auld * drm_i915_query_item in the array: 3126e3bdccafSMatthew Auld * 3127e3bdccafSMatthew Auld * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct 3128e3bdccafSMatthew Auld * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The 3129e3bdccafSMatthew Auld * kernel will then fill in the size, in bytes, which tells userspace how 3130e3bdccafSMatthew Auld * memory it needs to allocate for the blob(say for an array of properties). 3131e3bdccafSMatthew Auld * 3132e3bdccafSMatthew Auld * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the 3133e3bdccafSMatthew Auld * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that 3134e3bdccafSMatthew Auld * the &drm_i915_query_item.length should still be the same as what the 3135e3bdccafSMatthew Auld * kernel previously set. At this point the kernel can fill in the blob. 3136e3bdccafSMatthew Auld * 3137e3bdccafSMatthew Auld * Note that for some query items it can make sense for userspace to just pass 3138e3bdccafSMatthew Auld * in a buffer/blob equal to or larger than the required size. In this case only 3139e3bdccafSMatthew Auld * a single ioctl call is needed. For some smaller query items this can work 3140e3bdccafSMatthew Auld * quite well. 3141e3bdccafSMatthew Auld * 3142e3bdccafSMatthew Auld */ 3143a446ae2cSLionel Landwerlin struct drm_i915_query { 3144e3bdccafSMatthew Auld /** @num_items: The number of elements in the @items_ptr array */ 3145a446ae2cSLionel Landwerlin __u32 num_items; 3146a446ae2cSLionel Landwerlin 3147e3bdccafSMatthew Auld /** 3148e3bdccafSMatthew Auld * @flags: Unused for now. Must be cleared to zero. 3149a446ae2cSLionel Landwerlin */ 3150a446ae2cSLionel Landwerlin __u32 flags; 3151a446ae2cSLionel Landwerlin 3152e3bdccafSMatthew Auld /** 3153e3bdccafSMatthew Auld * @items_ptr: 3154e3bdccafSMatthew Auld * 3155e3bdccafSMatthew Auld * Pointer to an array of struct drm_i915_query_item. The number of 3156e3bdccafSMatthew Auld * array elements is @num_items. 3157a446ae2cSLionel Landwerlin */ 3158a446ae2cSLionel Landwerlin __u64 items_ptr; 3159a446ae2cSLionel Landwerlin }; 3160a446ae2cSLionel Landwerlin 3161462ac1cdSMatt Roper /** 3162462ac1cdSMatt Roper * struct drm_i915_query_topology_info 3163c822e059SLionel Landwerlin * 3164462ac1cdSMatt Roper * Describes slice/subslice/EU information queried by 3165462ac1cdSMatt Roper * %DRM_I915_QUERY_TOPOLOGY_INFO 3166c822e059SLionel Landwerlin */ 3167c822e059SLionel Landwerlin struct drm_i915_query_topology_info { 3168462ac1cdSMatt Roper /** 3169462ac1cdSMatt Roper * @flags: 3170462ac1cdSMatt Roper * 3171c822e059SLionel Landwerlin * Unused for now. Must be cleared to zero. 3172c822e059SLionel Landwerlin */ 3173c822e059SLionel Landwerlin __u16 flags; 3174c822e059SLionel Landwerlin 3175462ac1cdSMatt Roper /** 3176462ac1cdSMatt Roper * @max_slices: 3177462ac1cdSMatt Roper * 3178462ac1cdSMatt Roper * The number of bits used to express the slice mask. 3179462ac1cdSMatt Roper */ 3180c822e059SLionel Landwerlin __u16 max_slices; 3181462ac1cdSMatt Roper 3182462ac1cdSMatt Roper /** 3183462ac1cdSMatt Roper * @max_subslices: 3184462ac1cdSMatt Roper * 3185462ac1cdSMatt Roper * The number of bits used to express the subslice mask. 3186462ac1cdSMatt Roper */ 3187c822e059SLionel Landwerlin __u16 max_subslices; 3188462ac1cdSMatt Roper 3189462ac1cdSMatt Roper /** 3190462ac1cdSMatt Roper * @max_eus_per_subslice: 3191462ac1cdSMatt Roper * 3192462ac1cdSMatt Roper * The number of bits in the EU mask that correspond to a single 3193462ac1cdSMatt Roper * subslice's EUs. 3194462ac1cdSMatt Roper */ 3195c822e059SLionel Landwerlin __u16 max_eus_per_subslice; 3196c822e059SLionel Landwerlin 3197462ac1cdSMatt Roper /** 3198462ac1cdSMatt Roper * @subslice_offset: 3199462ac1cdSMatt Roper * 3200c822e059SLionel Landwerlin * Offset in data[] at which the subslice masks are stored. 3201c822e059SLionel Landwerlin */ 3202c822e059SLionel Landwerlin __u16 subslice_offset; 3203c822e059SLionel Landwerlin 3204462ac1cdSMatt Roper /** 3205462ac1cdSMatt Roper * @subslice_stride: 3206462ac1cdSMatt Roper * 3207c822e059SLionel Landwerlin * Stride at which each of the subslice masks for each slice are 3208c822e059SLionel Landwerlin * stored. 3209c822e059SLionel Landwerlin */ 3210c822e059SLionel Landwerlin __u16 subslice_stride; 3211c822e059SLionel Landwerlin 3212462ac1cdSMatt Roper /** 3213462ac1cdSMatt Roper * @eu_offset: 3214462ac1cdSMatt Roper * 3215c822e059SLionel Landwerlin * Offset in data[] at which the EU masks are stored. 3216c822e059SLionel Landwerlin */ 3217c822e059SLionel Landwerlin __u16 eu_offset; 3218c822e059SLionel Landwerlin 3219462ac1cdSMatt Roper /** 3220462ac1cdSMatt Roper * @eu_stride: 3221462ac1cdSMatt Roper * 3222c822e059SLionel Landwerlin * Stride at which each of the EU masks for each subslice are stored. 3223c822e059SLionel Landwerlin */ 3224c822e059SLionel Landwerlin __u16 eu_stride; 3225c822e059SLionel Landwerlin 3226462ac1cdSMatt Roper /** 3227462ac1cdSMatt Roper * @data: 3228462ac1cdSMatt Roper * 3229462ac1cdSMatt Roper * Contains 3 pieces of information : 3230462ac1cdSMatt Roper * 3231462ac1cdSMatt Roper * - The slice mask with one bit per slice telling whether a slice is 3232462ac1cdSMatt Roper * available. The availability of slice X can be queried with the 3233462ac1cdSMatt Roper * following formula : 3234462ac1cdSMatt Roper * 3235462ac1cdSMatt Roper * .. code:: c 3236462ac1cdSMatt Roper * 3237462ac1cdSMatt Roper * (data[X / 8] >> (X % 8)) & 1 3238462ac1cdSMatt Roper * 3239462ac1cdSMatt Roper * Starting with Xe_HP platforms, Intel hardware no longer has 3240462ac1cdSMatt Roper * traditional slices so i915 will always report a single slice 3241462ac1cdSMatt Roper * (hardcoded slicemask = 0x1) which contains all of the platform's 3242462ac1cdSMatt Roper * subslices. I.e., the mask here does not reflect any of the newer 3243462ac1cdSMatt Roper * hardware concepts such as "gslices" or "cslices" since userspace 3244462ac1cdSMatt Roper * is capable of inferring those from the subslice mask. 3245462ac1cdSMatt Roper * 3246462ac1cdSMatt Roper * - The subslice mask for each slice with one bit per subslice telling 3247462ac1cdSMatt Roper * whether a subslice is available. Starting with Gen12 we use the 3248462ac1cdSMatt Roper * term "subslice" to refer to what the hardware documentation 3249462ac1cdSMatt Roper * describes as a "dual-subslices." The availability of subslice Y 3250462ac1cdSMatt Roper * in slice X can be queried with the following formula : 3251462ac1cdSMatt Roper * 3252462ac1cdSMatt Roper * .. code:: c 3253462ac1cdSMatt Roper * 3254462ac1cdSMatt Roper * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1 3255462ac1cdSMatt Roper * 3256462ac1cdSMatt Roper * - The EU mask for each subslice in each slice, with one bit per EU 3257462ac1cdSMatt Roper * telling whether an EU is available. The availability of EU Z in 3258462ac1cdSMatt Roper * subslice Y in slice X can be queried with the following formula : 3259462ac1cdSMatt Roper * 3260462ac1cdSMatt Roper * .. code:: c 3261462ac1cdSMatt Roper * 3262462ac1cdSMatt Roper * (data[eu_offset + 3263462ac1cdSMatt Roper * (X * max_subslices + Y) * eu_stride + 3264462ac1cdSMatt Roper * Z / 8 3265462ac1cdSMatt Roper * ] >> (Z % 8)) & 1 3266462ac1cdSMatt Roper */ 3267c822e059SLionel Landwerlin __u8 data[]; 3268c822e059SLionel Landwerlin }; 3269c822e059SLionel Landwerlin 3270c5d3e39cSTvrtko Ursulin /** 327157772953STvrtko Ursulin * DOC: Engine Discovery uAPI 327257772953STvrtko Ursulin * 327357772953STvrtko Ursulin * Engine discovery uAPI is a way of enumerating physical engines present in a 327457772953STvrtko Ursulin * GPU associated with an open i915 DRM file descriptor. This supersedes the old 327557772953STvrtko Ursulin * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like 327657772953STvrtko Ursulin * `I915_PARAM_HAS_BLT`. 327757772953STvrtko Ursulin * 327857772953STvrtko Ursulin * The need for this interface came starting with Icelake and newer GPUs, which 327957772953STvrtko Ursulin * started to establish a pattern of having multiple engines of a same class, 328057772953STvrtko Ursulin * where not all instances were always completely functionally equivalent. 328157772953STvrtko Ursulin * 328257772953STvrtko Ursulin * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the 328357772953STvrtko Ursulin * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id. 328457772953STvrtko Ursulin * 328557772953STvrtko Ursulin * Example for getting the list of engines: 328657772953STvrtko Ursulin * 328757772953STvrtko Ursulin * .. code-block:: C 328857772953STvrtko Ursulin * 328957772953STvrtko Ursulin * struct drm_i915_query_engine_info *info; 329057772953STvrtko Ursulin * struct drm_i915_query_item item = { 329157772953STvrtko Ursulin * .query_id = DRM_I915_QUERY_ENGINE_INFO; 329257772953STvrtko Ursulin * }; 329357772953STvrtko Ursulin * struct drm_i915_query query = { 329457772953STvrtko Ursulin * .num_items = 1, 329557772953STvrtko Ursulin * .items_ptr = (uintptr_t)&item, 329657772953STvrtko Ursulin * }; 329757772953STvrtko Ursulin * int err, i; 329857772953STvrtko Ursulin * 329957772953STvrtko Ursulin * // First query the size of the blob we need, this needs to be large 330057772953STvrtko Ursulin * // enough to hold our array of engines. The kernel will fill out the 330157772953STvrtko Ursulin * // item.length for us, which is the number of bytes we need. 330257772953STvrtko Ursulin * // 330357772953STvrtko Ursulin * // Alternatively a large buffer can be allocated straightaway enabling 330457772953STvrtko Ursulin * // querying in one pass, in which case item.length should contain the 330557772953STvrtko Ursulin * // length of the provided buffer. 330657772953STvrtko Ursulin * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 330757772953STvrtko Ursulin * if (err) ... 330857772953STvrtko Ursulin * 330957772953STvrtko Ursulin * info = calloc(1, item.length); 331057772953STvrtko Ursulin * // Now that we allocated the required number of bytes, we call the ioctl 331157772953STvrtko Ursulin * // again, this time with the data_ptr pointing to our newly allocated 331257772953STvrtko Ursulin * // blob, which the kernel can then populate with info on all engines. 3313afa5cf31SRandy Dunlap * item.data_ptr = (uintptr_t)&info; 331457772953STvrtko Ursulin * 331557772953STvrtko Ursulin * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 331657772953STvrtko Ursulin * if (err) ... 331757772953STvrtko Ursulin * 331857772953STvrtko Ursulin * // We can now access each engine in the array 331957772953STvrtko Ursulin * for (i = 0; i < info->num_engines; i++) { 332057772953STvrtko Ursulin * struct drm_i915_engine_info einfo = info->engines[i]; 332157772953STvrtko Ursulin * u16 class = einfo.engine.class; 332257772953STvrtko Ursulin * u16 instance = einfo.engine.instance; 332357772953STvrtko Ursulin * .... 332457772953STvrtko Ursulin * } 332557772953STvrtko Ursulin * 332657772953STvrtko Ursulin * free(info); 332757772953STvrtko Ursulin * 332857772953STvrtko Ursulin * Each of the enumerated engines, apart from being defined by its class and 332957772953STvrtko Ursulin * instance (see `struct i915_engine_class_instance`), also can have flags and 333057772953STvrtko Ursulin * capabilities defined as documented in i915_drm.h. 333157772953STvrtko Ursulin * 333257772953STvrtko Ursulin * For instance video engines which support HEVC encoding will have the 333357772953STvrtko Ursulin * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set. 333457772953STvrtko Ursulin * 333557772953STvrtko Ursulin * Engine discovery only fully comes to its own when combined with the new way 333657772953STvrtko Ursulin * of addressing engines when submitting batch buffers using contexts with 333757772953STvrtko Ursulin * engine maps configured. 333857772953STvrtko Ursulin */ 333957772953STvrtko Ursulin 334057772953STvrtko Ursulin /** 3341c5d3e39cSTvrtko Ursulin * struct drm_i915_engine_info 3342c5d3e39cSTvrtko Ursulin * 3343afa5cf31SRandy Dunlap * Describes one engine and its capabilities as known to the driver. 3344c5d3e39cSTvrtko Ursulin */ 3345c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info { 33462ef6a01fSMatthew Auld /** @engine: Engine class and instance. */ 3347c5d3e39cSTvrtko Ursulin struct i915_engine_class_instance engine; 3348c5d3e39cSTvrtko Ursulin 33492ef6a01fSMatthew Auld /** @rsvd0: Reserved field. */ 3350c5d3e39cSTvrtko Ursulin __u32 rsvd0; 3351c5d3e39cSTvrtko Ursulin 33522ef6a01fSMatthew Auld /** @flags: Engine flags. */ 3353c5d3e39cSTvrtko Ursulin __u64 flags; 33549409eb35SMatthew Brost #define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0) 3355c5d3e39cSTvrtko Ursulin 33562ef6a01fSMatthew Auld /** @capabilities: Capabilities of this engine. */ 3357c5d3e39cSTvrtko Ursulin __u64 capabilities; 3358c5d3e39cSTvrtko Ursulin #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) 3359c5d3e39cSTvrtko Ursulin #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) 3360c5d3e39cSTvrtko Ursulin 33619409eb35SMatthew Brost /** @logical_instance: Logical instance of engine */ 33629409eb35SMatthew Brost __u16 logical_instance; 33639409eb35SMatthew Brost 33642ef6a01fSMatthew Auld /** @rsvd1: Reserved fields. */ 33659409eb35SMatthew Brost __u16 rsvd1[3]; 33669409eb35SMatthew Brost /** @rsvd2: Reserved fields. */ 33679409eb35SMatthew Brost __u64 rsvd2[3]; 3368c5d3e39cSTvrtko Ursulin }; 3369c5d3e39cSTvrtko Ursulin 3370c5d3e39cSTvrtko Ursulin /** 3371c5d3e39cSTvrtko Ursulin * struct drm_i915_query_engine_info 3372c5d3e39cSTvrtko Ursulin * 3373c5d3e39cSTvrtko Ursulin * Engine info query enumerates all engines known to the driver by filling in 3374c5d3e39cSTvrtko Ursulin * an array of struct drm_i915_engine_info structures. 3375c5d3e39cSTvrtko Ursulin */ 3376c5d3e39cSTvrtko Ursulin struct drm_i915_query_engine_info { 33772ef6a01fSMatthew Auld /** @num_engines: Number of struct drm_i915_engine_info structs following. */ 3378c5d3e39cSTvrtko Ursulin __u32 num_engines; 3379c5d3e39cSTvrtko Ursulin 33802ef6a01fSMatthew Auld /** @rsvd: MBZ */ 3381c5d3e39cSTvrtko Ursulin __u32 rsvd[3]; 3382c5d3e39cSTvrtko Ursulin 33832ef6a01fSMatthew Auld /** @engines: Marker for drm_i915_engine_info structures. */ 3384c5d3e39cSTvrtko Ursulin struct drm_i915_engine_info engines[]; 3385c5d3e39cSTvrtko Ursulin }; 3386c5d3e39cSTvrtko Ursulin 3387a2e54026SMatt Roper /** 3388a2e54026SMatt Roper * struct drm_i915_query_perf_config 3389a2e54026SMatt Roper * 3390c94fde8fSMatt Atwood * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and 3391c94fde8fSMatt Atwood * %DRM_I915_QUERY_GEOMETRY_SUBSLICES. 33924f6ccc74SLionel Landwerlin */ 33934f6ccc74SLionel Landwerlin struct drm_i915_query_perf_config { 33944f6ccc74SLionel Landwerlin union { 3395a2e54026SMatt Roper /** 3396a2e54026SMatt Roper * @n_configs: 3397a2e54026SMatt Roper * 3398a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3399a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to 3400a2e54026SMatt Roper * the number of configurations available. 34014f6ccc74SLionel Landwerlin */ 34024f6ccc74SLionel Landwerlin __u64 n_configs; 34034f6ccc74SLionel Landwerlin 3404a2e54026SMatt Roper /** 3405a2e54026SMatt Roper * @config: 3406a2e54026SMatt Roper * 3407a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3408a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the 3409a2e54026SMatt Roper * value in this field as configuration identifier to decide 3410a2e54026SMatt Roper * what data to write into config_ptr. 34114f6ccc74SLionel Landwerlin */ 34124f6ccc74SLionel Landwerlin __u64 config; 34134f6ccc74SLionel Landwerlin 3414a2e54026SMatt Roper /** 3415a2e54026SMatt Roper * @uuid: 3416a2e54026SMatt Roper * 3417a2e54026SMatt Roper * When &drm_i915_query_item.flags == 3418a2e54026SMatt Roper * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the 3419a2e54026SMatt Roper * value in this field as configuration identifier to decide 3420a2e54026SMatt Roper * what data to write into config_ptr. 34214f6ccc74SLionel Landwerlin * 34224f6ccc74SLionel Landwerlin * String formatted like "%08x-%04x-%04x-%04x-%012x" 34234f6ccc74SLionel Landwerlin */ 34244f6ccc74SLionel Landwerlin char uuid[36]; 34254f6ccc74SLionel Landwerlin }; 34264f6ccc74SLionel Landwerlin 3427a2e54026SMatt Roper /** 3428a2e54026SMatt Roper * @flags: 3429a2e54026SMatt Roper * 34304f6ccc74SLionel Landwerlin * Unused for now. Must be cleared to zero. 34314f6ccc74SLionel Landwerlin */ 34324f6ccc74SLionel Landwerlin __u32 flags; 34334f6ccc74SLionel Landwerlin 3434a2e54026SMatt Roper /** 3435a2e54026SMatt Roper * @data: 34364f6ccc74SLionel Landwerlin * 3437a2e54026SMatt Roper * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST, 3438a2e54026SMatt Roper * i915 will write an array of __u64 of configuration identifiers. 3439a2e54026SMatt Roper * 3440a2e54026SMatt Roper * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA, 3441a2e54026SMatt Roper * i915 will write a struct drm_i915_perf_oa_config. If the following 3442a2e54026SMatt Roper * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will 3443a2e54026SMatt Roper * write into the associated pointers the values of submitted when the 34444f6ccc74SLionel Landwerlin * configuration was created : 34454f6ccc74SLionel Landwerlin * 3446a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_mux_regs 3447a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_boolean_regs 3448a2e54026SMatt Roper * - &drm_i915_perf_oa_config.n_flex_regs 34494f6ccc74SLionel Landwerlin */ 34504f6ccc74SLionel Landwerlin __u8 data[]; 34514f6ccc74SLionel Landwerlin }; 34524f6ccc74SLionel Landwerlin 345371021729SAbdiel Janulgue /** 345471021729SAbdiel Janulgue * enum drm_i915_gem_memory_class - Supported memory classes 345571021729SAbdiel Janulgue */ 345671021729SAbdiel Janulgue enum drm_i915_gem_memory_class { 345771021729SAbdiel Janulgue /** @I915_MEMORY_CLASS_SYSTEM: System memory */ 345871021729SAbdiel Janulgue I915_MEMORY_CLASS_SYSTEM = 0, 345971021729SAbdiel Janulgue /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */ 346071021729SAbdiel Janulgue I915_MEMORY_CLASS_DEVICE, 346171021729SAbdiel Janulgue }; 346271021729SAbdiel Janulgue 346371021729SAbdiel Janulgue /** 346471021729SAbdiel Janulgue * struct drm_i915_gem_memory_class_instance - Identify particular memory region 346571021729SAbdiel Janulgue */ 346671021729SAbdiel Janulgue struct drm_i915_gem_memory_class_instance { 346771021729SAbdiel Janulgue /** @memory_class: See enum drm_i915_gem_memory_class */ 346871021729SAbdiel Janulgue __u16 memory_class; 346971021729SAbdiel Janulgue 347071021729SAbdiel Janulgue /** @memory_instance: Which instance */ 347171021729SAbdiel Janulgue __u16 memory_instance; 347271021729SAbdiel Janulgue }; 347371021729SAbdiel Janulgue 347471021729SAbdiel Janulgue /** 347571021729SAbdiel Janulgue * struct drm_i915_memory_region_info - Describes one region as known to the 347671021729SAbdiel Janulgue * driver. 347771021729SAbdiel Janulgue * 347871021729SAbdiel Janulgue * Note this is using both struct drm_i915_query_item and struct drm_i915_query. 347971021729SAbdiel Janulgue * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS 348071021729SAbdiel Janulgue * at &drm_i915_query_item.query_id. 348171021729SAbdiel Janulgue */ 348271021729SAbdiel Janulgue struct drm_i915_memory_region_info { 348371021729SAbdiel Janulgue /** @region: The class:instance pair encoding */ 348471021729SAbdiel Janulgue struct drm_i915_gem_memory_class_instance region; 348571021729SAbdiel Janulgue 348671021729SAbdiel Janulgue /** @rsvd0: MBZ */ 348771021729SAbdiel Janulgue __u32 rsvd0; 348871021729SAbdiel Janulgue 34893f4309cbSMatthew Auld /** 34903f4309cbSMatthew Auld * @probed_size: Memory probed by the driver 34913f4309cbSMatthew Auld * 34923f4309cbSMatthew Auld * Note that it should not be possible to ever encounter a zero value 34933f4309cbSMatthew Auld * here, also note that no current region type will ever return -1 here. 34943f4309cbSMatthew Auld * Although for future region types, this might be a possibility. The 34953f4309cbSMatthew Auld * same applies to the other size fields. 34963f4309cbSMatthew Auld */ 349771021729SAbdiel Janulgue __u64 probed_size; 349871021729SAbdiel Janulgue 3499141f733bSMatthew Auld /** 3500141f733bSMatthew Auld * @unallocated_size: Estimate of memory remaining 3501141f733bSMatthew Auld * 3502141f733bSMatthew Auld * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting. 3503141f733bSMatthew Auld * Without this (or if this is an older kernel) the value here will 3504141f733bSMatthew Auld * always equal the @probed_size. Note this is only currently tracked 3505141f733bSMatthew Auld * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here 3506141f733bSMatthew Auld * will always equal the @probed_size). 3507141f733bSMatthew Auld */ 350871021729SAbdiel Janulgue __u64 unallocated_size; 350971021729SAbdiel Janulgue 35103f4309cbSMatthew Auld union { 351171021729SAbdiel Janulgue /** @rsvd1: MBZ */ 351271021729SAbdiel Janulgue __u64 rsvd1[8]; 35133f4309cbSMatthew Auld struct { 35143f4309cbSMatthew Auld /** 35153f4309cbSMatthew Auld * @probed_cpu_visible_size: Memory probed by the driver 35163f4309cbSMatthew Auld * that is CPU accessible. 35173f4309cbSMatthew Auld * 35183f4309cbSMatthew Auld * This will be always be <= @probed_size, and the 35193f4309cbSMatthew Auld * remainder (if there is any) will not be CPU 35203f4309cbSMatthew Auld * accessible. 35213f4309cbSMatthew Auld * 35223f4309cbSMatthew Auld * On systems without small BAR, the @probed_size will 35233f4309cbSMatthew Auld * always equal the @probed_cpu_visible_size, since all 35243f4309cbSMatthew Auld * of it will be CPU accessible. 35253f4309cbSMatthew Auld * 35263f4309cbSMatthew Auld * Note this is only tracked for 35273f4309cbSMatthew Auld * I915_MEMORY_CLASS_DEVICE regions (for other types the 35283f4309cbSMatthew Auld * value here will always equal the @probed_size). 35293f4309cbSMatthew Auld * 35303f4309cbSMatthew Auld * Note that if the value returned here is zero, then 35313f4309cbSMatthew Auld * this must be an old kernel which lacks the relevant 35323f4309cbSMatthew Auld * small-bar uAPI support (including 35333f4309cbSMatthew Auld * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on 35343f4309cbSMatthew Auld * such systems we should never actually end up with a 35353f4309cbSMatthew Auld * small BAR configuration, assuming we are able to load 35363f4309cbSMatthew Auld * the kernel module. Hence it should be safe to treat 35373f4309cbSMatthew Auld * this the same as when @probed_cpu_visible_size == 35383f4309cbSMatthew Auld * @probed_size. 35393f4309cbSMatthew Auld */ 35403f4309cbSMatthew Auld __u64 probed_cpu_visible_size; 3541141f733bSMatthew Auld 3542141f733bSMatthew Auld /** 3543141f733bSMatthew Auld * @unallocated_cpu_visible_size: Estimate of CPU 3544141f733bSMatthew Auld * visible memory remaining. 3545141f733bSMatthew Auld * 3546141f733bSMatthew Auld * Note this is only tracked for 3547141f733bSMatthew Auld * I915_MEMORY_CLASS_DEVICE regions (for other types the 3548141f733bSMatthew Auld * value here will always equal the 3549141f733bSMatthew Auld * @probed_cpu_visible_size). 3550141f733bSMatthew Auld * 3551141f733bSMatthew Auld * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable 3552141f733bSMatthew Auld * accounting. Without this the value here will always 3553141f733bSMatthew Auld * equal the @probed_cpu_visible_size. Note this is only 3554141f733bSMatthew Auld * currently tracked for I915_MEMORY_CLASS_DEVICE 3555141f733bSMatthew Auld * regions (for other types the value here will also 3556141f733bSMatthew Auld * always equal the @probed_cpu_visible_size). 3557141f733bSMatthew Auld * 3558141f733bSMatthew Auld * If this is an older kernel the value here will be 3559141f733bSMatthew Auld * zero, see also @probed_cpu_visible_size. 3560141f733bSMatthew Auld */ 3561141f733bSMatthew Auld __u64 unallocated_cpu_visible_size; 35623f4309cbSMatthew Auld }; 35633f4309cbSMatthew Auld }; 356471021729SAbdiel Janulgue }; 356571021729SAbdiel Janulgue 356671021729SAbdiel Janulgue /** 356771021729SAbdiel Janulgue * struct drm_i915_query_memory_regions 356871021729SAbdiel Janulgue * 356971021729SAbdiel Janulgue * The region info query enumerates all regions known to the driver by filling 357071021729SAbdiel Janulgue * in an array of struct drm_i915_memory_region_info structures. 357171021729SAbdiel Janulgue * 357271021729SAbdiel Janulgue * Example for getting the list of supported regions: 357371021729SAbdiel Janulgue * 357471021729SAbdiel Janulgue * .. code-block:: C 357571021729SAbdiel Janulgue * 357671021729SAbdiel Janulgue * struct drm_i915_query_memory_regions *info; 357771021729SAbdiel Janulgue * struct drm_i915_query_item item = { 357871021729SAbdiel Janulgue * .query_id = DRM_I915_QUERY_MEMORY_REGIONS; 357971021729SAbdiel Janulgue * }; 358071021729SAbdiel Janulgue * struct drm_i915_query query = { 358171021729SAbdiel Janulgue * .num_items = 1, 358271021729SAbdiel Janulgue * .items_ptr = (uintptr_t)&item, 358371021729SAbdiel Janulgue * }; 358471021729SAbdiel Janulgue * int err, i; 358571021729SAbdiel Janulgue * 358671021729SAbdiel Janulgue * // First query the size of the blob we need, this needs to be large 358771021729SAbdiel Janulgue * // enough to hold our array of regions. The kernel will fill out the 358871021729SAbdiel Janulgue * // item.length for us, which is the number of bytes we need. 358971021729SAbdiel Janulgue * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 359071021729SAbdiel Janulgue * if (err) ... 359171021729SAbdiel Janulgue * 359271021729SAbdiel Janulgue * info = calloc(1, item.length); 359371021729SAbdiel Janulgue * // Now that we allocated the required number of bytes, we call the ioctl 359471021729SAbdiel Janulgue * // again, this time with the data_ptr pointing to our newly allocated 359571021729SAbdiel Janulgue * // blob, which the kernel can then populate with the all the region info. 359671021729SAbdiel Janulgue * item.data_ptr = (uintptr_t)&info, 359771021729SAbdiel Janulgue * 359871021729SAbdiel Janulgue * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query); 359971021729SAbdiel Janulgue * if (err) ... 360071021729SAbdiel Janulgue * 360171021729SAbdiel Janulgue * // We can now access each region in the array 360271021729SAbdiel Janulgue * for (i = 0; i < info->num_regions; i++) { 360371021729SAbdiel Janulgue * struct drm_i915_memory_region_info mr = info->regions[i]; 360471021729SAbdiel Janulgue * u16 class = mr.region.class; 360571021729SAbdiel Janulgue * u16 instance = mr.region.instance; 360671021729SAbdiel Janulgue * 360771021729SAbdiel Janulgue * .... 360871021729SAbdiel Janulgue * } 360971021729SAbdiel Janulgue * 361071021729SAbdiel Janulgue * free(info); 361171021729SAbdiel Janulgue */ 361271021729SAbdiel Janulgue struct drm_i915_query_memory_regions { 361371021729SAbdiel Janulgue /** @num_regions: Number of supported regions */ 361471021729SAbdiel Janulgue __u32 num_regions; 361571021729SAbdiel Janulgue 361671021729SAbdiel Janulgue /** @rsvd: MBZ */ 361771021729SAbdiel Janulgue __u32 rsvd[3]; 361871021729SAbdiel Janulgue 361971021729SAbdiel Janulgue /** @regions: Info about each supported region */ 362071021729SAbdiel Janulgue struct drm_i915_memory_region_info regions[]; 362171021729SAbdiel Janulgue }; 362271021729SAbdiel Janulgue 3623ebcb4029SMatthew Auld /** 3624b1123648STvrtko Ursulin * struct drm_i915_query_guc_submission_version - query GuC submission interface version 3625b1123648STvrtko Ursulin */ 3626b1123648STvrtko Ursulin struct drm_i915_query_guc_submission_version { 36275cf0fbf7STvrtko Ursulin /** @branch: Firmware branch version. */ 3628b1123648STvrtko Ursulin __u32 branch; 36295cf0fbf7STvrtko Ursulin /** @major: Firmware major version. */ 3630b1123648STvrtko Ursulin __u32 major; 36315cf0fbf7STvrtko Ursulin /** @minor: Firmware minor version. */ 3632b1123648STvrtko Ursulin __u32 minor; 36335cf0fbf7STvrtko Ursulin /** @patch: Firmware patch version. */ 3634b1123648STvrtko Ursulin __u32 patch; 3635b1123648STvrtko Ursulin }; 3636b1123648STvrtko Ursulin 3637b1123648STvrtko Ursulin /** 3638034d47b2STvrtko Ursulin * DOC: GuC HWCONFIG blob uAPI 3639034d47b2STvrtko Ursulin * 3640034d47b2STvrtko Ursulin * The GuC produces a blob with information about the current device. 3641034d47b2STvrtko Ursulin * i915 reads this blob from GuC and makes it available via this uAPI. 3642034d47b2STvrtko Ursulin * 3643034d47b2STvrtko Ursulin * The format and meaning of the blob content are documented in the 3644034d47b2STvrtko Ursulin * Programmer's Reference Manual. 3645034d47b2STvrtko Ursulin */ 3646034d47b2STvrtko Ursulin 3647034d47b2STvrtko Ursulin /** 3648ebcb4029SMatthew Auld * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added 3649ebcb4029SMatthew Auld * extension support using struct i915_user_extension. 3650ebcb4029SMatthew Auld * 3651525e93f6SMatthew Auld * Note that new buffer flags should be added here, at least for the stuff that 3652525e93f6SMatthew Auld * is immutable. Previously we would have two ioctls, one to create the object 3653525e93f6SMatthew Auld * with gem_create, and another to apply various parameters, however this 3654525e93f6SMatthew Auld * creates some ambiguity for the params which are considered immutable. Also in 3655525e93f6SMatthew Auld * general we're phasing out the various SET/GET ioctls. 3656ebcb4029SMatthew Auld */ 3657ebcb4029SMatthew Auld struct drm_i915_gem_create_ext { 3658ebcb4029SMatthew Auld /** 3659ebcb4029SMatthew Auld * @size: Requested size for the object. 3660ebcb4029SMatthew Auld * 3661ebcb4029SMatthew Auld * The (page-aligned) allocated size for the object will be returned. 3662ebcb4029SMatthew Auld * 36638133a6daSMatthew Auld * On platforms like DG2/ATS the kernel will always use 64K or larger 36648133a6daSMatthew Auld * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a 36658133a6daSMatthew Auld * minimum of 64K GTT alignment for such objects. 3666caa574ffSMatthew Auld * 36678133a6daSMatthew Auld * NOTE: Previously the ABI here required a minimum GTT alignment of 2M 36688133a6daSMatthew Auld * on DG2/ATS, due to how the hardware implemented 64K GTT page support, 36698133a6daSMatthew Auld * where we had the following complications: 3670caa574ffSMatthew Auld * 3671caa574ffSMatthew Auld * 1) The entire PDE (which covers a 2MB virtual address range), must 3672caa574ffSMatthew Auld * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same 3673caa574ffSMatthew Auld * PDE is forbidden by the hardware. 3674caa574ffSMatthew Auld * 3675caa574ffSMatthew Auld * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM 3676caa574ffSMatthew Auld * objects. 3677caa574ffSMatthew Auld * 36788133a6daSMatthew Auld * However on actual production HW this was completely changed to now 36798133a6daSMatthew Auld * allow setting a TLB hint at the PTE level (see PS64), which is a lot 36808133a6daSMatthew Auld * more flexible than the above. With this the 2M restriction was 36818133a6daSMatthew Auld * dropped where we now only require 64K. 3682ebcb4029SMatthew Auld */ 3683ebcb4029SMatthew Auld __u64 size; 3684525e93f6SMatthew Auld 3685ebcb4029SMatthew Auld /** 3686ebcb4029SMatthew Auld * @handle: Returned handle for the object. 3687ebcb4029SMatthew Auld * 3688ebcb4029SMatthew Auld * Object handles are nonzero. 3689ebcb4029SMatthew Auld */ 3690ebcb4029SMatthew Auld __u32 handle; 3691525e93f6SMatthew Auld 3692525e93f6SMatthew Auld /** 3693525e93f6SMatthew Auld * @flags: Optional flags. 3694525e93f6SMatthew Auld * 3695525e93f6SMatthew Auld * Supported values: 3696525e93f6SMatthew Auld * 3697525e93f6SMatthew Auld * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that 3698525e93f6SMatthew Auld * the object will need to be accessed via the CPU. 3699525e93f6SMatthew Auld * 3700525e93f6SMatthew Auld * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only 3701525e93f6SMatthew Auld * strictly required on configurations where some subset of the device 3702525e93f6SMatthew Auld * memory is directly visible/mappable through the CPU (which we also 3703525e93f6SMatthew Auld * call small BAR), like on some DG2+ systems. Note that this is quite 3704525e93f6SMatthew Auld * undesirable, but due to various factors like the client CPU, BIOS etc 3705525e93f6SMatthew Auld * it's something we can expect to see in the wild. See 3706525e93f6SMatthew Auld * &drm_i915_memory_region_info.probed_cpu_visible_size for how to 3707525e93f6SMatthew Auld * determine if this system applies. 3708525e93f6SMatthew Auld * 3709525e93f6SMatthew Auld * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to 3710525e93f6SMatthew Auld * ensure the kernel can always spill the allocation to system memory, 3711525e93f6SMatthew Auld * if the object can't be allocated in the mappable part of 3712525e93f6SMatthew Auld * I915_MEMORY_CLASS_DEVICE. 3713525e93f6SMatthew Auld * 3714525e93f6SMatthew Auld * Also note that since the kernel only supports flat-CCS on objects 3715525e93f6SMatthew Auld * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 3716525e93f6SMatthew Auld * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with 3717525e93f6SMatthew Auld * flat-CCS. 3718525e93f6SMatthew Auld * 3719525e93f6SMatthew Auld * Without this hint, the kernel will assume that non-mappable 3720525e93f6SMatthew Auld * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the 3721525e93f6SMatthew Auld * kernel can still migrate the object to the mappable part, as a last 3722525e93f6SMatthew Auld * resort, if userspace ever CPU faults this object, but this might be 3723525e93f6SMatthew Auld * expensive, and so ideally should be avoided. 3724525e93f6SMatthew Auld * 3725525e93f6SMatthew Auld * On older kernels which lack the relevant small-bar uAPI support (see 3726525e93f6SMatthew Auld * also &drm_i915_memory_region_info.probed_cpu_visible_size), 3727525e93f6SMatthew Auld * usage of the flag will result in an error, but it should NEVER be 3728525e93f6SMatthew Auld * possible to end up with a small BAR configuration, assuming we can 3729525e93f6SMatthew Auld * also successfully load the i915 kernel module. In such cases the 3730525e93f6SMatthew Auld * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as 3731525e93f6SMatthew Auld * such there are zero restrictions on where the object can be placed. 3732525e93f6SMatthew Auld */ 3733525e93f6SMatthew Auld #define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0) 3734ebcb4029SMatthew Auld __u32 flags; 3735525e93f6SMatthew Auld 3736ebcb4029SMatthew Auld /** 3737ebcb4029SMatthew Auld * @extensions: The chain of extensions to apply to this object. 3738ebcb4029SMatthew Auld * 3739ebcb4029SMatthew Auld * This will be useful in the future when we need to support several 3740ebcb4029SMatthew Auld * different extensions, and we need to apply more than one when 3741ebcb4029SMatthew Auld * creating the object. See struct i915_user_extension. 3742ebcb4029SMatthew Auld * 3743ebcb4029SMatthew Auld * If we don't supply any extensions then we get the same old gem_create 3744ebcb4029SMatthew Auld * behaviour. 3745ebcb4029SMatthew Auld * 37462459e56fSMatthew Auld * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see 37472459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions. 3748d3ac8d42SDaniele Ceraolo Spurio * 3749d3ac8d42SDaniele Ceraolo Spurio * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see 3750d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content. 375181b1b599SFei Yang * 375281b1b599SFei Yang * For I915_GEM_CREATE_EXT_SET_PAT usage see 375381b1b599SFei Yang * struct drm_i915_gem_create_ext_set_pat. 3754ebcb4029SMatthew Auld */ 37552459e56fSMatthew Auld #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 3756d3ac8d42SDaniele Ceraolo Spurio #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 375781b1b599SFei Yang #define I915_GEM_CREATE_EXT_SET_PAT 2 3758ebcb4029SMatthew Auld __u64 extensions; 3759ebcb4029SMatthew Auld }; 3760ebcb4029SMatthew Auld 37612459e56fSMatthew Auld /** 37622459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions - The 37632459e56fSMatthew Auld * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension. 37642459e56fSMatthew Auld * 37652459e56fSMatthew Auld * Set the object with the desired set of placements/regions in priority 37662459e56fSMatthew Auld * order. Each entry must be unique and supported by the device. 37672459e56fSMatthew Auld * 37682459e56fSMatthew Auld * This is provided as an array of struct drm_i915_gem_memory_class_instance, or 37692459e56fSMatthew Auld * an equivalent layout of class:instance pair encodings. See struct 37702459e56fSMatthew Auld * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to 37712459e56fSMatthew Auld * query the supported regions for a device. 37722459e56fSMatthew Auld * 37732459e56fSMatthew Auld * As an example, on discrete devices, if we wish to set the placement as 37742459e56fSMatthew Auld * device local-memory we can do something like: 37752459e56fSMatthew Auld * 37762459e56fSMatthew Auld * .. code-block:: C 37772459e56fSMatthew Auld * 37782459e56fSMatthew Auld * struct drm_i915_gem_memory_class_instance region_lmem = { 37792459e56fSMatthew Auld * .memory_class = I915_MEMORY_CLASS_DEVICE, 37802459e56fSMatthew Auld * .memory_instance = 0, 37812459e56fSMatthew Auld * }; 37822459e56fSMatthew Auld * struct drm_i915_gem_create_ext_memory_regions regions = { 37832459e56fSMatthew Auld * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS }, 37842459e56fSMatthew Auld * .regions = (uintptr_t)®ion_lmem, 37852459e56fSMatthew Auld * .num_regions = 1, 37862459e56fSMatthew Auld * }; 37872459e56fSMatthew Auld * struct drm_i915_gem_create_ext create_ext = { 37882459e56fSMatthew Auld * .size = 16 * PAGE_SIZE, 37892459e56fSMatthew Auld * .extensions = (uintptr_t)®ions, 37902459e56fSMatthew Auld * }; 37912459e56fSMatthew Auld * 37922459e56fSMatthew Auld * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 37932459e56fSMatthew Auld * if (err) ... 37942459e56fSMatthew Auld * 37952459e56fSMatthew Auld * At which point we get the object handle in &drm_i915_gem_create_ext.handle, 37962459e56fSMatthew Auld * along with the final object size in &drm_i915_gem_create_ext.size, which 37972459e56fSMatthew Auld * should account for any rounding up, if required. 3798a50794f2SRamalingam C * 3799a50794f2SRamalingam C * Note that userspace has no means of knowing the current backing region 3800a50794f2SRamalingam C * for objects where @num_regions is larger than one. The kernel will only 3801a50794f2SRamalingam C * ensure that the priority order of the @regions array is honoured, either 3802a50794f2SRamalingam C * when initially placing the object, or when moving memory around due to 3803a50794f2SRamalingam C * memory pressure 3804a50794f2SRamalingam C * 3805a50794f2SRamalingam C * On Flat-CCS capable HW, compression is supported for the objects residing 3806a50794f2SRamalingam C * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other 3807a50794f2SRamalingam C * memory class in @regions and migrated (by i915, due to memory 3808a50794f2SRamalingam C * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to 3809a50794f2SRamalingam C * decompress the content. But i915 doesn't have the required information to 3810a50794f2SRamalingam C * decompress the userspace compressed objects. 3811a50794f2SRamalingam C * 3812a50794f2SRamalingam C * So i915 supports Flat-CCS, on the objects which can reside only on 3813a50794f2SRamalingam C * I915_MEMORY_CLASS_DEVICE regions. 38142459e56fSMatthew Auld */ 38152459e56fSMatthew Auld struct drm_i915_gem_create_ext_memory_regions { 38162459e56fSMatthew Auld /** @base: Extension link. See struct i915_user_extension. */ 38172459e56fSMatthew Auld struct i915_user_extension base; 38182459e56fSMatthew Auld 38192459e56fSMatthew Auld /** @pad: MBZ */ 38202459e56fSMatthew Auld __u32 pad; 38212459e56fSMatthew Auld /** @num_regions: Number of elements in the @regions array. */ 38222459e56fSMatthew Auld __u32 num_regions; 38232459e56fSMatthew Auld /** 38242459e56fSMatthew Auld * @regions: The regions/placements array. 38252459e56fSMatthew Auld * 38262459e56fSMatthew Auld * An array of struct drm_i915_gem_memory_class_instance. 38272459e56fSMatthew Auld */ 38282459e56fSMatthew Auld __u64 regions; 38292459e56fSMatthew Auld }; 38302459e56fSMatthew Auld 3831d3ac8d42SDaniele Ceraolo Spurio /** 3832d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content - The 3833d3ac8d42SDaniele Ceraolo Spurio * I915_OBJECT_PARAM_PROTECTED_CONTENT extension. 3834d3ac8d42SDaniele Ceraolo Spurio * 3835d3ac8d42SDaniele Ceraolo Spurio * If this extension is provided, buffer contents are expected to be protected 3836d3ac8d42SDaniele Ceraolo Spurio * by PXP encryption and require decryption for scan out and processing. This 3837d3ac8d42SDaniele Ceraolo Spurio * is only possible on platforms that have PXP enabled, on all other scenarios 3838d3ac8d42SDaniele Ceraolo Spurio * using this extension will cause the ioctl to fail and return -ENODEV. The 3839d3ac8d42SDaniele Ceraolo Spurio * flags parameter is reserved for future expansion and must currently be set 3840d3ac8d42SDaniele Ceraolo Spurio * to zero. 3841d3ac8d42SDaniele Ceraolo Spurio * 3842d3ac8d42SDaniele Ceraolo Spurio * The buffer contents are considered invalid after a PXP session teardown. 3843d3ac8d42SDaniele Ceraolo Spurio * 3844d3ac8d42SDaniele Ceraolo Spurio * The encryption is guaranteed to be processed correctly only if the object 3845d3ac8d42SDaniele Ceraolo Spurio * is submitted with a context created using the 3846d3ac8d42SDaniele Ceraolo Spurio * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks 3847d3ac8d42SDaniele Ceraolo Spurio * at submission time on the validity of the objects involved. 3848d3ac8d42SDaniele Ceraolo Spurio * 3849d3ac8d42SDaniele Ceraolo Spurio * Below is an example on how to create a protected object: 3850d3ac8d42SDaniele Ceraolo Spurio * 3851d3ac8d42SDaniele Ceraolo Spurio * .. code-block:: C 3852d3ac8d42SDaniele Ceraolo Spurio * 3853d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext_protected_content protected_ext = { 3854d3ac8d42SDaniele Ceraolo Spurio * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT }, 3855d3ac8d42SDaniele Ceraolo Spurio * .flags = 0, 3856d3ac8d42SDaniele Ceraolo Spurio * }; 3857d3ac8d42SDaniele Ceraolo Spurio * struct drm_i915_gem_create_ext create_ext = { 3858d3ac8d42SDaniele Ceraolo Spurio * .size = PAGE_SIZE, 3859d3ac8d42SDaniele Ceraolo Spurio * .extensions = (uintptr_t)&protected_ext, 3860d3ac8d42SDaniele Ceraolo Spurio * }; 3861d3ac8d42SDaniele Ceraolo Spurio * 3862d3ac8d42SDaniele Ceraolo Spurio * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 3863d3ac8d42SDaniele Ceraolo Spurio * if (err) ... 3864d3ac8d42SDaniele Ceraolo Spurio */ 3865d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_create_ext_protected_content { 3866d3ac8d42SDaniele Ceraolo Spurio /** @base: Extension link. See struct i915_user_extension. */ 3867d3ac8d42SDaniele Ceraolo Spurio struct i915_user_extension base; 3868d3ac8d42SDaniele Ceraolo Spurio /** @flags: reserved for future usage, currently MBZ */ 3869d3ac8d42SDaniele Ceraolo Spurio __u32 flags; 3870d3ac8d42SDaniele Ceraolo Spurio }; 3871d3ac8d42SDaniele Ceraolo Spurio 387281b1b599SFei Yang /** 387381b1b599SFei Yang * struct drm_i915_gem_create_ext_set_pat - The 387481b1b599SFei Yang * I915_GEM_CREATE_EXT_SET_PAT extension. 387581b1b599SFei Yang * 387681b1b599SFei Yang * If this extension is provided, the specified caching policy (PAT index) is 387781b1b599SFei Yang * applied to the buffer object. 387881b1b599SFei Yang * 387981b1b599SFei Yang * Below is an example on how to create an object with specific caching policy: 388081b1b599SFei Yang * 388181b1b599SFei Yang * .. code-block:: C 388281b1b599SFei Yang * 388381b1b599SFei Yang * struct drm_i915_gem_create_ext_set_pat set_pat_ext = { 388481b1b599SFei Yang * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT }, 388581b1b599SFei Yang * .pat_index = 0, 388681b1b599SFei Yang * }; 388781b1b599SFei Yang * struct drm_i915_gem_create_ext create_ext = { 388881b1b599SFei Yang * .size = PAGE_SIZE, 388981b1b599SFei Yang * .extensions = (uintptr_t)&set_pat_ext, 389081b1b599SFei Yang * }; 389181b1b599SFei Yang * 389281b1b599SFei Yang * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); 389381b1b599SFei Yang * if (err) ... 389481b1b599SFei Yang */ 389581b1b599SFei Yang struct drm_i915_gem_create_ext_set_pat { 389681b1b599SFei Yang /** @base: Extension link. See struct i915_user_extension. */ 389781b1b599SFei Yang struct i915_user_extension base; 389881b1b599SFei Yang /** 389981b1b599SFei Yang * @pat_index: PAT index to be set 390081b1b599SFei Yang * PAT index is a bit field in Page Table Entry to control caching 390181b1b599SFei Yang * behaviors for GPU accesses. The definition of PAT index is 390281b1b599SFei Yang * platform dependent and can be found in hardware specifications, 390381b1b599SFei Yang */ 390481b1b599SFei Yang __u32 pat_index; 390581b1b599SFei Yang /** @rsvd: reserved for future use */ 390681b1b599SFei Yang __u32 rsvd; 390781b1b599SFei Yang }; 390881b1b599SFei Yang 3909cbbd3764SHuang, Sean Z /* ID of the protected content session managed by i915 when PXP is active */ 3910cbbd3764SHuang, Sean Z #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf 3911cbbd3764SHuang, Sean Z 3912b1c1f5c4SEmil Velikov #if defined(__cplusplus) 3913b1c1f5c4SEmil Velikov } 3914b1c1f5c4SEmil Velikov #endif 3915b1c1f5c4SEmil Velikov 3916718dceddSDavid Howells #endif /* _UAPI_I915_DRM_H_ */ 3917