xref: /linux/include/uapi/drm/xe_drm.h (revision caaed1dda7df9b4e21d439bb5e7750d4af4f1e78)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef _UAPI_XE_DRM_H_
7 #define _UAPI_XE_DRM_H_
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 /*
16  * Please note that modifications to all structs defined here are
17  * subject to backwards-compatibility constraints.
18  * Sections in this file are organized as follows:
19  *   1. IOCTL definition
20  *   2. Extension definition and helper structs
21  *   3. IOCTL's Query structs in the order of the Query's entries.
22  *   4. The rest of IOCTL structs in the order of IOCTL declaration.
23  */
24 
25 /**
26  * DOC: Xe Device Block Diagram
27  *
28  * The diagram below represents a high-level simplification of a discrete
29  * GPU supported by the Xe driver. It shows some device components which
30  * are necessary to understand this API, as well as how their relations
31  * to each other. This diagram does not represent real hardware::
32  *
33  *   ┌──────────────────────────────────────────────────────────────────┐
34  *   │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
35  *   │ │        ┌───────────────────────┐   ┌─────┐       │ │ ┌─────┐ │ │
36  *   │ │        │         VRAM0         ├───┤ ... │       │ │ │VRAM1│ │ │
37  *   │ │        └───────────┬───────────┘   └─GT1─┘       │ │ └──┬──┘ │ │
38  *   │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
39  *   │ │ │ ┌─────────────────────┐  ┌─────────────────┐ │ │ │ │     │ │ │
40  *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
41  *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │RCS0 │ │BCS0 │ │ │ │ │ │     │ │ │
42  *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
43  *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
44  *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │VCS0 │ │VCS1 │ │ │ │ │ │     │ │ │
45  *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
46  *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
47  *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
48  *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
49  *   │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
50  *   │ │ │ │ │EU│ │EU│ │EU│ │EU│ │  │ │CCS0 │ │CCS1 │ │ │ │ │ │     │ │ │
51  *   │ │ │ │ └──┘ └──┘ └──┘ └──┘ │  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
52  *   │ │ │ └─────────DSS─────────┘  │ ┌─────┐ ┌─────┐ │ │ │ │ │     │ │ │
53  *   │ │ │                          │ │CCS2 │ │CCS3 │ │ │ │ │ │     │ │ │
54  *   │ │ │ ┌─────┐ ┌─────┐ ┌─────┐  │ └─────┘ └─────┘ │ │ │ │ │     │ │ │
55  *   │ │ │ │ ... │ │ ... │ │ ... │  │                 │ │ │ │ │     │ │ │
56  *   │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘  └─────Engines─────┘ │ │ │ │     │ │ │
57  *   │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
58  *   │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
59  *   └─────────────────────────────Device0───────┬──────────────────────┘
60  *                                               │
61  *                        ───────────────────────┴────────── PCI bus
62  */
63 
64 /**
65  * DOC: Xe uAPI Overview
66  *
67  * This section aims to describe the Xe's IOCTL entries, its structs, and other
68  * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
69  * entries and usage.
70  *
71  * List of supported IOCTLs:
72  *  - &DRM_IOCTL_XE_DEVICE_QUERY
73  *  - &DRM_IOCTL_XE_GEM_CREATE
74  *  - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75  *  - &DRM_IOCTL_XE_VM_CREATE
76  *  - &DRM_IOCTL_XE_VM_DESTROY
77  *  - &DRM_IOCTL_XE_VM_BIND
78  *  - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79  *  - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80  *  - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81  *  - &DRM_IOCTL_XE_EXEC
82  *  - &DRM_IOCTL_XE_WAIT_USER_FENCE
83  *  - &DRM_IOCTL_XE_OBSERVATION
84  *  - &DRM_IOCTL_XE_MADVISE
85  *  - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
86  */
87 
88 /*
89  * xe specific ioctls.
90  *
91  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
92  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
93  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
94  */
95 #define DRM_XE_DEVICE_QUERY		0x00
96 #define DRM_XE_GEM_CREATE		0x01
97 #define DRM_XE_GEM_MMAP_OFFSET		0x02
98 #define DRM_XE_VM_CREATE		0x03
99 #define DRM_XE_VM_DESTROY		0x04
100 #define DRM_XE_VM_BIND			0x05
101 #define DRM_XE_EXEC_QUEUE_CREATE	0x06
102 #define DRM_XE_EXEC_QUEUE_DESTROY	0x07
103 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x08
104 #define DRM_XE_EXEC			0x09
105 #define DRM_XE_WAIT_USER_FENCE		0x0a
106 #define DRM_XE_OBSERVATION		0x0b
107 #define DRM_XE_MADVISE			0x0c
108 #define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS	0x0d
109 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY	0x0e
110 
111 /* Must be kept compact -- no holes */
112 
113 #define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
114 #define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
115 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
116 #define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
117 #define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
118 #define DRM_IOCTL_XE_VM_BIND			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
119 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
120 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
121 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
122 #define DRM_IOCTL_XE_EXEC			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
123 #define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
124 #define DRM_IOCTL_XE_OBSERVATION		DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
125 #define DRM_IOCTL_XE_MADVISE			DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
126 #define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
127 #define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY	DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
128 
129 /**
130  * DOC: Xe IOCTL Extensions
131  *
132  * Before detailing the IOCTLs and its structs, it is important to highlight
133  * that every IOCTL in Xe is extensible.
134  *
135  * Many interfaces need to grow over time. In most cases we can simply
136  * extend the struct and have userspace pass in more data. Another option,
137  * as demonstrated by Vulkan's approach to providing extensions for forward
138  * and backward compatibility, is to use a list of optional structs to
139  * provide those extra details.
140  *
141  * The key advantage to using an extension chain is that it allows us to
142  * redefine the interface more easily than an ever growing struct of
143  * increasing complexity, and for large parts of that interface to be
144  * entirely optional. The downside is more pointer chasing; chasing across
145  * the __user boundary with pointers encapsulated inside u64.
146  *
147  * Example chaining:
148  *
149  * .. code-block:: C
150  *
151  *	struct drm_xe_user_extension ext3 {
152  *		.next_extension = 0, // end
153  *		.name = ...,
154  *	};
155  *	struct drm_xe_user_extension ext2 {
156  *		.next_extension = (uintptr_t)&ext3,
157  *		.name = ...,
158  *	};
159  *	struct drm_xe_user_extension ext1 {
160  *		.next_extension = (uintptr_t)&ext2,
161  *		.name = ...,
162  *	};
163  *
164  * Typically the struct drm_xe_user_extension would be embedded in some uAPI
165  * struct, and in this case we would feed it the head of the chain(i.e ext1),
166  * which would then apply all of the above extensions.
167 */
168 
169 /**
170  * struct drm_xe_user_extension - Base class for defining a chain of extensions
171  */
172 struct drm_xe_user_extension {
173 	/**
174 	 * @next_extension:
175 	 *
176 	 * Pointer to the next struct drm_xe_user_extension, or zero if the end.
177 	 */
178 	__u64 next_extension;
179 
180 	/**
181 	 * @name: Name of the extension.
182 	 *
183 	 * Note that the name here is just some integer.
184 	 *
185 	 * Also note that the name space for this is not global for the whole
186 	 * driver, but rather its scope/meaning is limited to the specific piece
187 	 * of uAPI which has embedded the struct drm_xe_user_extension.
188 	 */
189 	__u32 name;
190 
191 	/**
192 	 * @pad: MBZ
193 	 *
194 	 * All undefined bits must be zero.
195 	 */
196 	__u32 pad;
197 };
198 
199 /**
200  * struct drm_xe_ext_set_property - Generic set property extension
201  *
202  * A generic struct that allows any of the Xe's IOCTL to be extended
203  * with a set_property operation.
204  */
205 struct drm_xe_ext_set_property {
206 	/** @base: base user extension */
207 	struct drm_xe_user_extension base;
208 
209 	/** @property: property to set */
210 	__u32 property;
211 
212 	/** @pad: MBZ */
213 	__u32 pad;
214 
215 	union {
216 		/** @value: property value */
217 		__u64 value;
218 		/** @ptr: pointer to user value */
219 		__u64 ptr;
220 	};
221 
222 	/** @reserved: Reserved */
223 	__u64 reserved[2];
224 };
225 
226 /**
227  * struct drm_xe_engine_class_instance - instance of an engine class
228  *
229  * It is returned as part of the @drm_xe_engine, but it also is used as
230  * the input of engine selection for both @drm_xe_exec_queue_create and
231  * @drm_xe_query_engine_cycles
232  *
233  * The @engine_class can be:
234  *  - %DRM_XE_ENGINE_CLASS_RENDER
235  *  - %DRM_XE_ENGINE_CLASS_COPY
236  *  - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
237  *  - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
238  *  - %DRM_XE_ENGINE_CLASS_COMPUTE
239  *  - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
240  *    hardware engine class). Used for creating ordered queues of VM
241  *    bind operations.
242  */
243 struct drm_xe_engine_class_instance {
244 #define DRM_XE_ENGINE_CLASS_RENDER		0
245 #define DRM_XE_ENGINE_CLASS_COPY		1
246 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE	2
247 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE	3
248 #define DRM_XE_ENGINE_CLASS_COMPUTE		4
249 #define DRM_XE_ENGINE_CLASS_VM_BIND		5
250 	/** @engine_class: engine class id */
251 	__u16 engine_class;
252 	/** @engine_instance: engine instance id */
253 	__u16 engine_instance;
254 	/** @gt_id: Unique ID of this GT within the PCI Device */
255 	__u16 gt_id;
256 	/** @pad: MBZ */
257 	__u16 pad;
258 };
259 
260 /**
261  * struct drm_xe_engine - describe hardware engine
262  */
263 struct drm_xe_engine {
264 	/** @instance: The @drm_xe_engine_class_instance */
265 	struct drm_xe_engine_class_instance instance;
266 
267 	/** @reserved: Reserved */
268 	__u64 reserved[3];
269 };
270 
271 /**
272  * struct drm_xe_query_engines - describe engines
273  *
274  * If a query is made with a struct @drm_xe_device_query where .query
275  * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
276  * struct @drm_xe_query_engines in .data.
277  */
278 struct drm_xe_query_engines {
279 	/** @num_engines: number of engines returned in @engines */
280 	__u32 num_engines;
281 	/** @pad: MBZ */
282 	__u32 pad;
283 	/** @engines: The returned engines for this device */
284 	struct drm_xe_engine engines[];
285 };
286 
287 /**
288  * enum drm_xe_memory_class - Supported memory classes.
289  */
290 enum drm_xe_memory_class {
291 	/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
292 	DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
293 	/**
294 	 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
295 	 * represents the memory that is local to the device, which we
296 	 * call VRAM. Not valid on integrated platforms.
297 	 */
298 	DRM_XE_MEM_REGION_CLASS_VRAM
299 };
300 
301 /**
302  * struct drm_xe_mem_region - Describes some region as known to
303  * the driver.
304  */
305 struct drm_xe_mem_region {
306 	/**
307 	 * @mem_class: The memory class describing this region.
308 	 *
309 	 * See enum drm_xe_memory_class for supported values.
310 	 */
311 	__u16 mem_class;
312 	/**
313 	 * @instance: The unique ID for this region, which serves as the
314 	 * index in the placement bitmask used as argument for
315 	 * &DRM_IOCTL_XE_GEM_CREATE
316 	 */
317 	__u16 instance;
318 	/**
319 	 * @min_page_size: Min page-size in bytes for this region.
320 	 *
321 	 * When the kernel allocates memory for this region, the
322 	 * underlying pages will be at least @min_page_size in size.
323 	 * Buffer objects with an allowable placement in this region must be
324 	 * created with a size aligned to this value.
325 	 * GPU virtual address mappings of (parts of) buffer objects that
326 	 * may be placed in this region must also have their GPU virtual
327 	 * address and range aligned to this value.
328 	 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
329 	 * not met.
330 	 */
331 	__u32 min_page_size;
332 	/**
333 	 * @total_size: The usable size in bytes for this region.
334 	 */
335 	__u64 total_size;
336 	/**
337 	 * @used: Estimate of the memory used in bytes for this region.
338 	 *
339 	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
340 	 * accounting.  Without this the value here will always equal
341 	 * zero.
342 	 */
343 	__u64 used;
344 	/**
345 	 * @cpu_visible_size: How much of this region can be CPU
346 	 * accessed, in bytes.
347 	 *
348 	 * This will always be <= @total_size, and the remainder (if
349 	 * any) will not be CPU accessible. If the CPU accessible part
350 	 * is smaller than @total_size then this is referred to as a
351 	 * small BAR system.
352 	 *
353 	 * On systems without small BAR (full BAR), the probed_size will
354 	 * always equal the @total_size, since all of it will be CPU
355 	 * accessible.
356 	 *
357 	 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
358 	 * regions (for other types the value here will always equal
359 	 * zero).
360 	 */
361 	__u64 cpu_visible_size;
362 	/**
363 	 * @cpu_visible_used: Estimate of CPU visible memory used, in
364 	 * bytes.
365 	 *
366 	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
367 	 * accounting. Without this the value here will always equal
368 	 * zero.  Note this is only currently tracked for
369 	 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
370 	 * here will always be zero).
371 	 */
372 	__u64 cpu_visible_used;
373 	/** @reserved: Reserved */
374 	__u64 reserved[6];
375 };
376 
377 /**
378  * struct drm_xe_query_mem_regions - describe memory regions
379  *
380  * If a query is made with a struct drm_xe_device_query where .query
381  * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
382  * struct drm_xe_query_mem_regions in .data.
383  */
384 struct drm_xe_query_mem_regions {
385 	/** @num_mem_regions: number of memory regions returned in @mem_regions */
386 	__u32 num_mem_regions;
387 	/** @pad: MBZ */
388 	__u32 pad;
389 	/** @mem_regions: The returned memory regions for this device */
390 	struct drm_xe_mem_region mem_regions[];
391 };
392 
393 /**
394  * struct drm_xe_query_config - describe the device configuration
395  *
396  * If a query is made with a struct drm_xe_device_query where .query
397  * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
398  * struct drm_xe_query_config in .data.
399  *
400  * The index in @info can be:
401  *  - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
402  *    and the device revision (next 8 bits)
403  *  - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
404  *    configuration, see list below
405  *
406  *    - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
407  *      has usable VRAM
408  *    - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
409  *      has low latency hint support
410  *    - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
411  *      device has CPU address mirroring support
412  *    - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
413  *      device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
414  *      This is exposed only on Xe2+.
415  *  - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
416  *    required by this device, typically SZ_4K or SZ_64K
417  *  - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
418  *  - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
419  *    available exec queue priority
420  */
421 struct drm_xe_query_config {
422 	/** @num_params: number of parameters returned in info */
423 	__u32 num_params;
424 
425 	/** @pad: MBZ */
426 	__u32 pad;
427 
428 #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
429 #define DRM_XE_QUERY_CONFIG_FLAGS			1
430 	#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM	(1 << 0)
431 	#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY	(1 << 1)
432 	#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR	(1 << 2)
433 	#define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
434 #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT		2
435 #define DRM_XE_QUERY_CONFIG_VA_BITS			3
436 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY	4
437 	/** @info: array of elements containing the config info */
438 	__u64 info[];
439 };
440 
441 /**
442  * struct drm_xe_gt - describe an individual GT.
443  *
444  * To be used with drm_xe_query_gt_list, which will return a list with all the
445  * existing GT individual descriptions.
446  * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
447  * implementing graphics and/or media operations.
448  *
449  * The index in @type can be:
450  *  - %DRM_XE_QUERY_GT_TYPE_MAIN
451  *  - %DRM_XE_QUERY_GT_TYPE_MEDIA
452  */
453 struct drm_xe_gt {
454 #define DRM_XE_QUERY_GT_TYPE_MAIN		0
455 #define DRM_XE_QUERY_GT_TYPE_MEDIA		1
456 	/** @type: GT type: Main or Media */
457 	__u16 type;
458 	/** @tile_id: Tile ID where this GT lives (Information only) */
459 	__u16 tile_id;
460 	/** @gt_id: Unique ID of this GT within the PCI Device */
461 	__u16 gt_id;
462 	/** @pad: MBZ */
463 	__u16 pad[3];
464 	/** @reference_clock: A clock frequency for timestamp */
465 	__u32 reference_clock;
466 	/**
467 	 * @near_mem_regions: Bit mask of instances from
468 	 * drm_xe_query_mem_regions that are nearest to the current engines
469 	 * of this GT.
470 	 * Each index in this mask refers directly to the struct
471 	 * drm_xe_query_mem_regions' instance, no assumptions should
472 	 * be made about order. The type of each region is described
473 	 * by struct drm_xe_query_mem_regions' mem_class.
474 	 */
475 	__u64 near_mem_regions;
476 	/**
477 	 * @far_mem_regions: Bit mask of instances from
478 	 * drm_xe_query_mem_regions that are far from the engines of this GT.
479 	 * In general, they have extra indirections when compared to the
480 	 * @near_mem_regions. For a discrete device this could mean system
481 	 * memory and memory living in a different tile.
482 	 * Each index in this mask refers directly to the struct
483 	 * drm_xe_query_mem_regions' instance, no assumptions should
484 	 * be made about order. The type of each region is described
485 	 * by struct drm_xe_query_mem_regions' mem_class.
486 	 */
487 	__u64 far_mem_regions;
488 	/** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */
489 	__u16 ip_ver_major;
490 	/** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */
491 	__u16 ip_ver_minor;
492 	/** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */
493 	__u16 ip_ver_rev;
494 	/** @pad2: MBZ */
495 	__u16 pad2;
496 	/** @reserved: Reserved */
497 	__u64 reserved[7];
498 };
499 
500 /**
501  * struct drm_xe_query_gt_list - A list with GT description items.
502  *
503  * If a query is made with a struct drm_xe_device_query where .query
504  * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
505  * drm_xe_query_gt_list in .data.
506  */
507 struct drm_xe_query_gt_list {
508 	/** @num_gt: number of GT items returned in gt_list */
509 	__u32 num_gt;
510 	/** @pad: MBZ */
511 	__u32 pad;
512 	/** @gt_list: The GT list returned for this device */
513 	struct drm_xe_gt gt_list[];
514 };
515 
516 /**
517  * struct drm_xe_query_topology_mask - describe the topology mask of a GT
518  *
519  * This is the hardware topology which reflects the internal physical
520  * structure of the GPU.
521  *
522  * If a query is made with a struct drm_xe_device_query where .query
523  * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
524  * struct drm_xe_query_topology_mask in .data.
525  *
526  * The @type can be:
527  *  - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
528  *    (DSS) available for geometry operations. For example a query response
529  *    containing the following in mask:
530  *    ``DSS_GEOMETRY    ff ff ff ff 00 00 00 00``
531  *    means 32 DSS are available for geometry.
532  *  - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
533  *    (DSS) available for compute operations. For example a query response
534  *    containing the following in mask:
535  *    ``DSS_COMPUTE    ff ff ff ff 00 00 00 00``
536  *    means 32 DSS are available for compute.
537  *  - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks.  This type
538  *    may be omitted if the driver is unable to query the mask from the
539  *    hardware.
540  *  - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
541  *    available per Dual Sub Slices (DSS). For example a query response
542  *    containing the following in mask:
543  *    ``EU_PER_DSS    ff ff 00 00 00 00 00 00``
544  *    means each DSS has 16 SIMD8 EUs. This type may be omitted if device
545  *    doesn't have SIMD8 EUs.
546  *  - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
547  *    Units (EU) available per Dual Sub Slices (DSS). For example a query
548  *    response containing the following in mask:
549  *    ``SIMD16_EU_PER_DSS    ff ff 00 00 00 00 00 00``
550  *    means each DSS has 16 SIMD16 EUs. This type may be omitted if device
551  *    doesn't have SIMD16 EUs.
552  */
553 struct drm_xe_query_topology_mask {
554 	/** @gt_id: GT ID the mask is associated with */
555 	__u16 gt_id;
556 
557 #define DRM_XE_TOPO_DSS_GEOMETRY	1
558 #define DRM_XE_TOPO_DSS_COMPUTE		2
559 #define DRM_XE_TOPO_L3_BANK		3
560 #define DRM_XE_TOPO_EU_PER_DSS		4
561 #define DRM_XE_TOPO_SIMD16_EU_PER_DSS	5
562 	/** @type: type of mask */
563 	__u16 type;
564 
565 	/** @num_bytes: number of bytes in requested mask */
566 	__u32 num_bytes;
567 
568 	/** @mask: little-endian mask of @num_bytes */
569 	__u8 mask[];
570 };
571 
572 /**
573  * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
574  *
575  * If a query is made with a struct drm_xe_device_query where .query is equal to
576  * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
577  * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
578  * .data points to this allocated structure.
579  *
580  * The query returns the engine cycles, which along with GT's @reference_clock,
581  * can be used to calculate the engine timestamp. In addition the
582  * query returns a set of cpu timestamps that indicate when the command
583  * streamer cycle count was captured.
584  */
585 struct drm_xe_query_engine_cycles {
586 	/**
587 	 * @eci: This is input by the user and is the engine for which command
588 	 * streamer cycles is queried.
589 	 */
590 	struct drm_xe_engine_class_instance eci;
591 
592 	/**
593 	 * @clockid: This is input by the user and is the reference clock id for
594 	 * CPU timestamp. For definition, see clock_gettime(2) and
595 	 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
596 	 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
597 	 */
598 	__s32 clockid;
599 
600 	/** @width: Width of the engine cycle counter in bits. */
601 	__u32 width;
602 
603 	/**
604 	 * @engine_cycles: Engine cycles as read from its register
605 	 * at 0x358 offset.
606 	 */
607 	__u64 engine_cycles;
608 
609 	/**
610 	 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
611 	 * reading the engine_cycles register using the reference clockid set by the
612 	 * user.
613 	 */
614 	__u64 cpu_timestamp;
615 
616 	/**
617 	 * @cpu_delta: Time delta in ns captured around reading the lower dword
618 	 * of the engine_cycles register.
619 	 */
620 	__u64 cpu_delta;
621 };
622 
623 /**
624  * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
625  *
626  * Given a uc_type this will return the branch, major, minor and patch version
627  * of the micro-controller firmware.
628  */
629 struct drm_xe_query_uc_fw_version {
630 	/** @uc_type: The micro-controller type to query firmware version */
631 #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0
632 #define XE_QUERY_UC_TYPE_HUC 1
633 	__u16 uc_type;
634 
635 	/** @pad: MBZ */
636 	__u16 pad;
637 
638 	/** @branch_ver: branch uc fw version */
639 	__u32 branch_ver;
640 	/** @major_ver: major uc fw version */
641 	__u32 major_ver;
642 	/** @minor_ver: minor uc fw version */
643 	__u32 minor_ver;
644 	/** @patch_ver: patch uc fw version */
645 	__u32 patch_ver;
646 
647 	/** @pad2: MBZ */
648 	__u32 pad2;
649 
650 	/** @reserved: Reserved */
651 	__u64 reserved;
652 };
653 
654 /**
655  * struct drm_xe_query_pxp_status - query if PXP is ready
656  *
657  * If PXP is enabled and no fatal error has occurred, the status will be set to
658  * one of the following values:
659  * 0: PXP init still in progress
660  * 1: PXP init complete
661  *
662  * If PXP is not enabled or something has gone wrong, the query will be failed
663  * with one of the following error codes:
664  * -ENODEV: PXP not supported or disabled;
665  * -EIO: fatal error occurred during init, so PXP will never be enabled;
666  * -EINVAL: incorrect value provided as part of the query;
667  * -EFAULT: error copying the memory between kernel and userspace.
668  *
669  * The status can only be 0 in the first few seconds after driver load. If
670  * everything works as expected, the status will transition to init complete in
671  * less than 1 second, while in case of errors the driver might take longer to
672  * start returning an error code, but it should still take less than 10 seconds.
673  *
674  * The supported session type bitmask is based on the values in
675  * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore
676  * is not reported in the bitmask.
677  *
678  */
679 struct drm_xe_query_pxp_status {
680 	/** @status: current PXP status */
681 	__u32 status;
682 
683 	/** @supported_session_types: bitmask of supported PXP session types */
684 	__u32 supported_session_types;
685 };
686 
687 /**
688  * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
689  * structure to query device information
690  *
691  * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
692  * and sets the value in the query member. This determines the type of
693  * the structure provided by the driver in data, among struct drm_xe_query_*.
694  *
695  * The @query can be:
696  *  - %DRM_XE_DEVICE_QUERY_ENGINES
697  *  - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
698  *  - %DRM_XE_DEVICE_QUERY_CONFIG
699  *  - %DRM_XE_DEVICE_QUERY_GT_LIST
700  *  - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
701  *    configuration of the device such as information on slices, memory,
702  *    caches, and so on. It is provided as a table of key / value
703  *    attributes.
704  *  - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
705  *  - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
706  *  - %DRM_XE_DEVICE_QUERY_PXP_STATUS
707  *
708  * If size is set to 0, the driver fills it with the required size for
709  * the requested type of data to query. If size is equal to the required
710  * size, the queried information is copied into data. If size is set to
711  * a value different from 0 and different from the required size, the
712  * IOCTL call returns -EINVAL.
713  *
714  * For example the following code snippet allows retrieving and printing
715  * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
716  *
717  * .. code-block:: C
718  *
719  *     struct drm_xe_query_engines *engines;
720  *     struct drm_xe_device_query query = {
721  *         .extensions = 0,
722  *         .query = DRM_XE_DEVICE_QUERY_ENGINES,
723  *         .size = 0,
724  *         .data = 0,
725  *     };
726  *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
727  *     engines = malloc(query.size);
728  *     query.data = (uintptr_t)engines;
729  *     ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
730  *     for (int i = 0; i < engines->num_engines; i++) {
731  *         printf("Engine %d: %s\n", i,
732  *             engines->engines[i].instance.engine_class ==
733  *                 DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
734  *             engines->engines[i].instance.engine_class ==
735  *                 DRM_XE_ENGINE_CLASS_COPY ? "COPY":
736  *             engines->engines[i].instance.engine_class ==
737  *                 DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
738  *             engines->engines[i].instance.engine_class ==
739  *                 DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
740  *             engines->engines[i].instance.engine_class ==
741  *                 DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
742  *             "UNKNOWN");
743  *     }
744  *     free(engines);
745  */
746 struct drm_xe_device_query {
747 	/** @extensions: Pointer to the first extension struct, if any */
748 	__u64 extensions;
749 
750 #define DRM_XE_DEVICE_QUERY_ENGINES		0
751 #define DRM_XE_DEVICE_QUERY_MEM_REGIONS		1
752 #define DRM_XE_DEVICE_QUERY_CONFIG		2
753 #define DRM_XE_DEVICE_QUERY_GT_LIST		3
754 #define DRM_XE_DEVICE_QUERY_HWCONFIG		4
755 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY		5
756 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES	6
757 #define DRM_XE_DEVICE_QUERY_UC_FW_VERSION	7
758 #define DRM_XE_DEVICE_QUERY_OA_UNITS		8
759 #define DRM_XE_DEVICE_QUERY_PXP_STATUS		9
760 #define DRM_XE_DEVICE_QUERY_EU_STALL		10
761 	/** @query: The type of data to query */
762 	__u32 query;
763 
764 	/** @size: Size of the queried data */
765 	__u32 size;
766 
767 	/** @data: Queried data is placed here */
768 	__u64 data;
769 
770 	/** @reserved: Reserved */
771 	__u64 reserved[2];
772 };
773 
774 /**
775  * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
776  * gem creation
777  *
778  * The @flags can be:
779  *  - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
780  *    allocation strategy by deferring physical memory allocation
781  *    until the object is either bound to a virtual memory region via
782  *    VM_BIND or accessed by the CPU. As a result, no backing memory is
783  *    reserved at the time of GEM object creation.
784  *  - %DRM_XE_GEM_CREATE_FLAG_SCANOUT - Indicates that the GEM object is
785  *    intended for scanout via the display engine. When set, kernel ensures
786  *    that the allocation is placed in a memory region compatible with the
787  *    display engine requirements. This may impose restrictions on tiling,
788  *    alignment, and memory placement to guarantee proper display functionality.
789  *  - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
790  *    possible placement, ensure that the corresponding VRAM allocation
791  *    will always use the CPU accessible part of VRAM. This is important
792  *    for small-bar systems (on full-bar systems this gets turned into a
793  *    noop).
794  *    Note1: System memory can be used as an extra placement if the kernel
795  *    should spill the allocation to system memory, if space can't be made
796  *    available in the CPU accessible part of VRAM (giving the same
797  *    behaviour as the i915 interface, see
798  *    I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
799  *    Note2: For clear-color CCS surfaces the kernel needs to read the
800  *    clear-color value stored in the buffer, and on discrete platforms we
801  *    need to use VRAM for display surfaces, therefore the kernel requires
802  *    setting this flag for such objects, otherwise an error is thrown on
803  *    small-bar systems.
804  *  - %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION - Allows userspace to
805  *    hint that compression (CCS) should be disabled for the buffer being
806  *    created. This can avoid unnecessary memory operations and CCS state
807  *    management.
808  *    On pre-Xe2 platforms, this flag is currently rejected as compression
809  *    control is not supported via PAT index. On Xe2+ platforms, compression
810  *    is controlled via PAT entries. If this flag is set, the driver will reject
811  *    any VM bind that requests a PAT index enabling compression for this BO.
812  *    Note: On dGPU platforms, there is currently no change in behavior with
813  *    this flag, but future improvements may leverage it. The current benefit is
814  *    primarily applicable to iGPU platforms.
815  *
816  * @cpu_caching supports the following values:
817  *  - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
818  *    caching. On iGPU this can't be used for scanout surfaces. Currently
819  *    not allowed for objects placed in VRAM.
820  *  - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
821  *    is uncached. Scanout surfaces should likely use this. All objects
822  *    that can be placed in VRAM must use this.
823  *
824  * This ioctl supports setting the following properties via the
825  * %DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY extension, which uses the
826  * generic @drm_xe_ext_set_property struct:
827  *
828  *  - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
829  *    this object will be used with. Valid values are listed in enum
830  *    drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
831  *    there is no need to explicitly set that. Objects used with session of type
832  *    %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation
833  *    event occurs after their creation. Attempting to flip an invalid object
834  *    will cause a black frame to be displayed instead. Submissions with invalid
835  *    objects mapped in the VM will be rejected.
836  */
837 struct drm_xe_gem_create {
838 #define DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY	0
839 #define   DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE	0
840 	/** @extensions: Pointer to the first extension struct, if any */
841 	__u64 extensions;
842 
843 	/**
844 	 * @size: Size of the object to be created, must match region
845 	 * (system or vram) minimum alignment (&min_page_size).
846 	 */
847 	__u64 size;
848 
849 	/**
850 	 * @placement: A mask of memory instances of where BO can be placed.
851 	 * Each index in this mask refers directly to the struct
852 	 * drm_xe_query_mem_regions' instance, no assumptions should
853 	 * be made about order. The type of each region is described
854 	 * by struct drm_xe_query_mem_regions' mem_class.
855 	 */
856 	__u32 placement;
857 
858 #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING		(1 << 0)
859 #define DRM_XE_GEM_CREATE_FLAG_SCANOUT			(1 << 1)
860 #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM	(1 << 2)
861 #define DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION		(1 << 3)
862 	/**
863 	 * @flags: Flags, currently a mask of memory instances of where BO can
864 	 * be placed
865 	 */
866 	__u32 flags;
867 
868 	/**
869 	 * @vm_id: Attached VM, if any
870 	 *
871 	 * If a VM is specified, this BO must:
872 	 *
873 	 *  1. Only ever be bound to that VM.
874 	 *  2. Cannot be exported as a PRIME fd.
875 	 */
876 	__u32 vm_id;
877 
878 	/**
879 	 * @handle: Returned handle for the object.
880 	 *
881 	 * Object handles are nonzero.
882 	 */
883 	__u32 handle;
884 
885 #define DRM_XE_GEM_CPU_CACHING_WB                      1
886 #define DRM_XE_GEM_CPU_CACHING_WC                      2
887 	/**
888 	 * @cpu_caching: The CPU caching mode to select for this object. If
889 	 * mmaping the object the mode selected here will also be used. The
890 	 * exception is when mapping system memory (including data evicted
891 	 * to system) on discrete GPUs. The caching mode selected will
892 	 * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency
893 	 * between GPU- and CPU is guaranteed. The caching mode of
894 	 * existing CPU-mappings will be updated transparently to
895 	 * user-space clients.
896 	 */
897 	__u16 cpu_caching;
898 	/** @pad: MBZ */
899 	__u16 pad[3];
900 
901 	/** @reserved: Reserved */
902 	__u64 reserved[2];
903 };
904 
905 /**
906  * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
907  *
908  * The @flags can be:
909  *  - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
910  *    for use in mmap ioctl. Writing to the returned mmap address will generate a
911  *    PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
912  *    to VRAM which would also add overhead), acting like an MI_MEM_FENCE
913  *    instruction.
914  *
915  * Note: The mmap size can be at most 4K, due to HW limitations. As a result
916  * this interface is only supported on CPU architectures that support 4K page
917  * size. The mmap_offset ioctl will detect this and gracefully return an
918  * error, where userspace is expected to have a different fallback method for
919  * triggering a barrier.
920  *
921  * Roughly the usage would be as follows:
922  *
923  * .. code-block:: C
924  *
925  *     struct drm_xe_gem_mmap_offset mmo = {
926  *         .handle = 0, // must be set to 0
927  *         .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
928  *     };
929  *
930  *     err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
931  *     map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
932  *     map[i] = 0xdeadbeaf; // issue barrier
933  */
934 struct drm_xe_gem_mmap_offset {
935 	/** @extensions: Pointer to the first extension struct, if any */
936 	__u64 extensions;
937 
938 	/** @handle: Handle for the object being mapped. */
939 	__u32 handle;
940 
941 #define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER     (1 << 0)
942 	/** @flags: Flags */
943 	__u32 flags;
944 
945 	/** @offset: The fake offset to use for subsequent mmap call */
946 	__u64 offset;
947 
948 	/** @reserved: Reserved */
949 	__u64 reserved[2];
950 };
951 
952 /**
953  * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
954  *
955  * The @flags can be:
956  *  - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
957  *    space of the VM to scratch page. A vm_bind would overwrite the scratch
958  *    page mapping. This flag is mutually exclusive with the
959  *    %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
960  *    xe3 platform.
961  *  - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
962  *    exec submissions to its exec_queues that don't have an upper time
963  *    limit on the job execution time. But exec submissions to these
964  *    don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
965  *    DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
966  *    together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
967  *    LR VMs can be created in recoverable page-fault mode using
968  *    DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
969  *    If that flag is omitted, the UMD can not rely on the slightly
970  *    different per-VM overcommit semantics that are enabled by
971  *    DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
972  *    still enable recoverable pagefaults if supported by the device.
973  *  - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
974  *    DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
975  *    demand when accessed, and also allows per-VM overcommit of memory.
976  *    The xe driver internally uses recoverable pagefaults to implement
977  *    this.
978  */
979 struct drm_xe_vm_create {
980 	/** @extensions: Pointer to the first extension struct, if any */
981 	__u64 extensions;
982 
983 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE	(1 << 0)
984 #define DRM_XE_VM_CREATE_FLAG_LR_MODE	        (1 << 1)
985 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE	(1 << 2)
986 	/** @flags: Flags */
987 	__u32 flags;
988 
989 	/** @vm_id: Returned VM ID */
990 	__u32 vm_id;
991 
992 	/** @reserved: Reserved */
993 	__u64 reserved[2];
994 };
995 
996 /**
997  * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
998  */
999 struct drm_xe_vm_destroy {
1000 	/** @vm_id: VM ID */
1001 	__u32 vm_id;
1002 
1003 	/** @pad: MBZ */
1004 	__u32 pad;
1005 
1006 	/** @reserved: Reserved */
1007 	__u64 reserved[2];
1008 };
1009 
1010 /**
1011  * struct drm_xe_vm_bind_op - run bind operations
1012  *
1013  * The @op can be:
1014  *  - %DRM_XE_VM_BIND_OP_MAP
1015  *  - %DRM_XE_VM_BIND_OP_UNMAP
1016  *  - %DRM_XE_VM_BIND_OP_MAP_USERPTR
1017  *  - %DRM_XE_VM_BIND_OP_UNMAP_ALL
1018  *  - %DRM_XE_VM_BIND_OP_PREFETCH
1019  *
1020  * and the @flags can be:
1021  *  - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
1022  *    to ensure write protection
1023  *  - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
1024  *    MAP operation immediately rather than deferring the MAP to the page
1025  *    fault handler. This is implied on a non-faulting VM as there is no
1026  *    fault handler to defer to.
1027  *  - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
1028  *    tables are setup with a special bit which indicates writes are
1029  *    dropped and all reads return zero. In the future, the NULL flags
1030  *    will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
1031  *    handle MBZ, and the BO offset MBZ. This flag is intended to
1032  *    implement VK sparse bindings.
1033  *  - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
1034  *    reject the binding if the encryption key is no longer valid. This
1035  *    flag has no effect on BOs that are not marked as using PXP.
1036  *  - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
1037  *    set, no mappings are created rather the range is reserved for CPU address
1038  *    mirroring which will be populated on GPU page faults or prefetches. Only
1039  *    valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
1040  *    mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
1041  *    handle MBZ, and the BO offset MBZ.
1042  *  - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with
1043  *    %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying
1044  *    CPU address space range is unmapped (typically with munmap(2) or brk(2)).
1045  *    The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values
1046  *    that were present immediately after the &DRM_IOCTL_XE_VM_BIND.
1047  *    The reset GPU virtual address range is the intersection of the range bound
1048  *    using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range
1049  *    unmapped.
1050  *    This functionality is present to mimic the behaviour of CPU address space
1051  *    madvises set using madvise(2), which are typically reset on unmap.
1052  *    Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus
1053  *    not invoke autoreset. Neither will stack variables going out of scope.
1054  *    Therefore it's recommended to always explicitly reset the madvises when
1055  *    freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
1056  *
1057  * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
1058  *  - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
1059  *    the memory region advised by madvise.
1060  */
1061 struct drm_xe_vm_bind_op {
1062 	/** @extensions: Pointer to the first extension struct, if any */
1063 	__u64 extensions;
1064 
1065 	/**
1066 	 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
1067 	 */
1068 	__u32 obj;
1069 
1070 	/**
1071 	 * @pat_index: The platform defined @pat_index to use for this mapping.
1072 	 * The index basically maps to some predefined memory attributes,
1073 	 * including things like caching, coherency, compression etc.  The exact
1074 	 * meaning of the pat_index is platform specific and defined in the
1075 	 * Bspec and PRMs.  When the KMD sets up the binding the index here is
1076 	 * encoded into the ppGTT PTE.
1077 	 *
1078 	 * For coherency the @pat_index needs to be at least 1way coherent when
1079 	 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
1080 	 * will extract the coherency mode from the @pat_index and reject if
1081 	 * there is a mismatch (see note below for pre-MTL platforms).
1082 	 *
1083 	 * Note: On pre-MTL platforms there is only a caching mode and no
1084 	 * explicit coherency mode, but on such hardware there is always a
1085 	 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
1086 	 * CPU caches even with the caching mode set as uncached.  It's only the
1087 	 * display engine that is incoherent (on dgpu it must be in VRAM which
1088 	 * is always mapped as WC on the CPU). However to keep the uapi somewhat
1089 	 * consistent with newer platforms the KMD groups the different cache
1090 	 * levels into the following coherency buckets on all pre-MTL platforms:
1091 	 *
1092 	 *	ppGTT UC -> COH_NONE
1093 	 *	ppGTT WC -> COH_NONE
1094 	 *	ppGTT WT -> COH_NONE
1095 	 *	ppGTT WB -> COH_AT_LEAST_1WAY
1096 	 *
1097 	 * In practice UC/WC/WT should only ever used for scanout surfaces on
1098 	 * such platforms (or perhaps in general for dma-buf if shared with
1099 	 * another device) since it is only the display engine that is actually
1100 	 * incoherent.  Everything else should typically use WB given that we
1101 	 * have a shared-LLC.  On MTL+ this completely changes and the HW
1102 	 * defines the coherency mode as part of the @pat_index, where
1103 	 * incoherent GT access is possible.
1104 	 *
1105 	 * Note: For userptr and externally imported dma-buf the kernel expects
1106 	 * either 1WAY or 2WAY for the @pat_index.
1107 	 *
1108 	 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
1109 	 * on the @pat_index. For such mappings there is no actual memory being
1110 	 * mapped (the address in the PTE is invalid), so the various PAT memory
1111 	 * attributes likely do not apply.  Simply leaving as zero is one
1112 	 * option (still a valid pat_index). Same applies to
1113 	 * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
1114 	 * there is no actual memory being mapped.
1115 	 */
1116 	__u16 pat_index;
1117 
1118 	/** @pad: MBZ */
1119 	__u16 pad;
1120 
1121 	union {
1122 		/**
1123 		 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
1124 		 * ignored for unbind
1125 		 */
1126 		__u64 obj_offset;
1127 
1128 		/** @userptr: user pointer to bind on */
1129 		__u64 userptr;
1130 
1131 		/**
1132 		 * @cpu_addr_mirror_offset: Offset from GPU @addr to create
1133 		 * CPU address mirror mappings. MBZ with current level of
1134 		 * support (e.g. 1 to 1 mapping between GPU and CPU mappings
1135 		 * only supported).
1136 		 */
1137 		__s64 cpu_addr_mirror_offset;
1138 	};
1139 
1140 	/**
1141 	 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
1142 	 */
1143 	__u64 range;
1144 
1145 	/** @addr: Address to operate on, MBZ for UNMAP_ALL */
1146 	__u64 addr;
1147 
1148 #define DRM_XE_VM_BIND_OP_MAP		0x0
1149 #define DRM_XE_VM_BIND_OP_UNMAP		0x1
1150 #define DRM_XE_VM_BIND_OP_MAP_USERPTR	0x2
1151 #define DRM_XE_VM_BIND_OP_UNMAP_ALL	0x3
1152 #define DRM_XE_VM_BIND_OP_PREFETCH	0x4
1153 	/** @op: Bind operation to perform */
1154 	__u32 op;
1155 
1156 #define DRM_XE_VM_BIND_FLAG_READONLY	(1 << 0)
1157 #define DRM_XE_VM_BIND_FLAG_IMMEDIATE	(1 << 1)
1158 #define DRM_XE_VM_BIND_FLAG_NULL	(1 << 2)
1159 #define DRM_XE_VM_BIND_FLAG_DUMPABLE	(1 << 3)
1160 #define DRM_XE_VM_BIND_FLAG_CHECK_PXP	(1 << 4)
1161 #define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR	(1 << 5)
1162 #define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET	(1 << 6)
1163 	/** @flags: Bind flags */
1164 	__u32 flags;
1165 
1166 #define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC	-1
1167 	/**
1168 	 * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
1169 	 * It is a region instance, not a mask.
1170 	 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
1171 	 */
1172 	__u32 prefetch_mem_region_instance;
1173 
1174 	/** @pad2: MBZ */
1175 	__u32 pad2;
1176 
1177 	/** @reserved: Reserved */
1178 	__u64 reserved[3];
1179 };
1180 
1181 /**
1182  * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1183  *
1184  * Below is an example of a minimal use of @drm_xe_vm_bind to
1185  * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
1186  * illustrate `userptr`. It can be synchronized by using the example
1187  * provided for @drm_xe_sync.
1188  *
1189  * .. code-block:: C
1190  *
1191  *     data = aligned_alloc(ALIGNMENT, BO_SIZE);
1192  *     struct drm_xe_vm_bind bind = {
1193  *         .vm_id = vm,
1194  *         .num_binds = 1,
1195  *         .bind.obj = 0,
1196  *         .bind.obj_offset = to_user_pointer(data),
1197  *         .bind.range = BO_SIZE,
1198  *         .bind.addr = BIND_ADDRESS,
1199  *         .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
1200  *         .bind.flags = 0,
1201  *         .num_syncs = 1,
1202  *         .syncs = &sync,
1203  *         .exec_queue_id = 0,
1204  *     };
1205  *     ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
1206  *
1207  */
1208 struct drm_xe_vm_bind {
1209 	/** @extensions: Pointer to the first extension struct, if any */
1210 	__u64 extensions;
1211 
1212 	/** @vm_id: The ID of the VM to bind to */
1213 	__u32 vm_id;
1214 
1215 	/**
1216 	 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
1217 	 * and exec queue must have same vm_id. If zero, the default VM bind engine
1218 	 * is used.
1219 	 */
1220 	__u32 exec_queue_id;
1221 
1222 	/** @pad: MBZ */
1223 	__u32 pad;
1224 
1225 	/** @num_binds: number of binds in this IOCTL */
1226 	__u32 num_binds;
1227 
1228 	union {
1229 		/** @bind: used if num_binds == 1 */
1230 		struct drm_xe_vm_bind_op bind;
1231 
1232 		/**
1233 		 * @vector_of_binds: userptr to array of struct
1234 		 * drm_xe_vm_bind_op if num_binds > 1
1235 		 */
1236 		__u64 vector_of_binds;
1237 	};
1238 
1239 	/** @pad2: MBZ */
1240 	__u32 pad2;
1241 
1242 	/** @num_syncs: amount of syncs to wait on */
1243 	__u32 num_syncs;
1244 
1245 	/** @syncs: pointer to struct drm_xe_sync array */
1246 	__u64 syncs;
1247 
1248 	/** @reserved: Reserved */
1249 	__u64 reserved[2];
1250 };
1251 
1252 /**
1253  * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1254  *
1255  * This ioctl supports setting the following properties via the
1256  * %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
1257  * generic @drm_xe_ext_set_property struct:
1258  *
1259  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
1260  *    CAP_SYS_NICE is required to set a value above normal.
1261  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
1262  *    duration in microseconds.
1263  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
1264  *    this queue will be used with. Valid values are listed in enum
1265  *    drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
1266  *    there is no need to explicitly set that. When a queue of type
1267  *    %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
1268  *    (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
1269  *    The user is expected to query the PXP status via the query ioctl (see
1270  *    %DRM_XE_DEVICE_QUERY_PXP_STATUS) and to wait for PXP to be ready before
1271  *    attempting to create a queue with this property. When a queue is created
1272  *    before PXP is ready, the ioctl will return -EBUSY if init is still in
1273  *    progress or -EIO if init failed.
1274  *    Given that going into a power-saving state kills PXP HWDRM sessions,
1275  *    runtime PM will be blocked while queues of this type are alive.
1276  *    All PXP queues will be killed if a PXP invalidation event occurs.
1277  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP - Create a multi-queue group
1278  *    or add secondary queues to a multi-queue group.
1279  *    If the extension's 'value' field has %DRM_XE_MULTI_GROUP_CREATE flag set,
1280  *    then a new multi-queue group is created with this queue as the primary queue
1281  *    (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
1282  *    queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
1283  *    All the other non-relevant bits of extension's 'value' field while adding the
1284  *    primary or the secondary queues of the group must be set to 0.
1285  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
1286  *    priority within the multi-queue group. Current valid priority values are 0–2
1287  *    (default is 1), with higher values indicating higher priority.
1288  *
1289  * The example below shows how to use @drm_xe_exec_queue_create to create
1290  * a simple exec_queue (no parallel submission) of class
1291  * &DRM_XE_ENGINE_CLASS_RENDER.
1292  *
1293  * .. code-block:: C
1294  *
1295  *     struct drm_xe_engine_class_instance instance = {
1296  *         .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
1297  *     };
1298  *     struct drm_xe_exec_queue_create exec_queue_create = {
1299  *          .extensions = 0,
1300  *          .vm_id = vm,
1301  *          .num_bb_per_exec = 1,
1302  *          .num_eng_per_bb = 1,
1303  *          .instances = to_user_pointer(&instance),
1304  *     };
1305  *     ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1306  *
1307  *     Allow users to provide a hint to kernel for cases demanding low latency
1308  *     profile. Please note it will have impact on power consumption. User can
1309  *     indicate low latency hint with flag while creating exec queue as
1310  *     mentioned below,
1311  *
1312  *     struct drm_xe_exec_queue_create exec_queue_create = {
1313  *          .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
1314  *          .extensions = 0,
1315  *          .vm_id = vm,
1316  *          .num_bb_per_exec = 1,
1317  *          .num_eng_per_bb = 1,
1318  *          .instances = to_user_pointer(&instance),
1319  *     };
1320  *     ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1321  *
1322  */
1323 struct drm_xe_exec_queue_create {
1324 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY		0
1325 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY		0
1326 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
1327 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE		2
1328 #define   DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE		3
1329 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP		4
1330 #define     DRM_XE_MULTI_GROUP_CREATE				(1ull << 63)
1331 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY	5
1332 	/** @extensions: Pointer to the first extension struct, if any */
1333 	__u64 extensions;
1334 
1335 	/** @width: submission width (number BB per exec) for this exec queue */
1336 	__u16 width;
1337 
1338 	/** @num_placements: number of valid placements for this exec queue */
1339 	__u16 num_placements;
1340 
1341 	/** @vm_id: VM to use for this exec queue */
1342 	__u32 vm_id;
1343 
1344 #define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT	(1 << 0)
1345 	/** @flags: flags to use for this exec queue */
1346 	__u32 flags;
1347 
1348 	/** @exec_queue_id: Returned exec queue ID */
1349 	__u32 exec_queue_id;
1350 
1351 	/**
1352 	 * @instances: user pointer to a 2-d array of struct
1353 	 * drm_xe_engine_class_instance
1354 	 *
1355 	 * length = width (i) * num_placements (j)
1356 	 * index = j + i * width
1357 	 */
1358 	__u64 instances;
1359 
1360 	/** @reserved: Reserved */
1361 	__u64 reserved[2];
1362 };
1363 
1364 /**
1365  * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1366  */
1367 struct drm_xe_exec_queue_destroy {
1368 	/** @exec_queue_id: Exec queue ID */
1369 	__u32 exec_queue_id;
1370 
1371 	/** @pad: MBZ */
1372 	__u32 pad;
1373 
1374 	/** @reserved: Reserved */
1375 	__u64 reserved[2];
1376 };
1377 
1378 /**
1379  * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1380  *
1381  * The @property can be:
1382  *  - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1383  */
1384 struct drm_xe_exec_queue_get_property {
1385 	/** @extensions: Pointer to the first extension struct, if any */
1386 	__u64 extensions;
1387 
1388 	/** @exec_queue_id: Exec queue ID */
1389 	__u32 exec_queue_id;
1390 
1391 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN	0
1392 	/** @property: property to get */
1393 	__u32 property;
1394 
1395 	/** @value: property value */
1396 	__u64 value;
1397 
1398 	/** @reserved: Reserved */
1399 	__u64 reserved[2];
1400 };
1401 
1402 /**
1403  * struct drm_xe_sync - sync object
1404  *
1405  * The @type can be:
1406  *  - %DRM_XE_SYNC_TYPE_SYNCOBJ
1407  *  - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1408  *  - %DRM_XE_SYNC_TYPE_USER_FENCE
1409  *
1410  * and the @flags can be:
1411  *  - %DRM_XE_SYNC_FLAG_SIGNAL
1412  *
1413  * A minimal use of @drm_xe_sync looks like this:
1414  *
1415  * .. code-block:: C
1416  *
1417  *     struct drm_xe_sync sync = {
1418  *         .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1419  *         .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
1420  *     };
1421  *     struct drm_syncobj_create syncobj_create = { 0 };
1422  *     ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
1423  *     sync.handle = syncobj_create.handle;
1424  *         ...
1425  *         use of &sync in drm_xe_exec or drm_xe_vm_bind
1426  *         ...
1427  *     struct drm_syncobj_wait wait = {
1428  *         .handles = &sync.handle,
1429  *         .timeout_nsec = INT64_MAX,
1430  *         .count_handles = 1,
1431  *         .flags = 0,
1432  *         .first_signaled = 0,
1433  *         .pad = 0,
1434  *     };
1435  *     ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1436  */
1437 struct drm_xe_sync {
1438 	/** @extensions: Pointer to the first extension struct, if any */
1439 	__u64 extensions;
1440 
1441 #define DRM_XE_SYNC_TYPE_SYNCOBJ		0x0
1442 #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ	0x1
1443 #define DRM_XE_SYNC_TYPE_USER_FENCE		0x2
1444 	/** @type: Type of the this sync object */
1445 	__u32 type;
1446 
1447 #define DRM_XE_SYNC_FLAG_SIGNAL	(1 << 0)
1448 	/** @flags: Sync Flags */
1449 	__u32 flags;
1450 
1451 	union {
1452 		/** @handle: Handle for the object */
1453 		__u32 handle;
1454 
1455 		/**
1456 		 * @addr: Address of user fence. When sync is passed in via exec
1457 		 * IOCTL this is a GPU address in the VM. When sync passed in via
1458 		 * VM bind IOCTL this is a user pointer. In either case, it is
1459 		 * the users responsibility that this address is present and
1460 		 * mapped when the user fence is signalled. Must be qword
1461 		 * aligned.
1462 		 */
1463 		__u64 addr;
1464 	};
1465 
1466 	/**
1467 	 * @timeline_value: Input for the timeline sync object. Needs to be
1468 	 * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
1469 	 */
1470 	__u64 timeline_value;
1471 
1472 	/** @reserved: Reserved */
1473 	__u64 reserved[2];
1474 };
1475 
1476 /**
1477  * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1478  *
1479  * This is an example to use @drm_xe_exec for execution of the object
1480  * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
1481  * (see example in @drm_xe_exec_queue_create). It can be synchronized
1482  * by using the example provided for @drm_xe_sync.
1483  *
1484  * .. code-block:: C
1485  *
1486  *     struct drm_xe_exec exec = {
1487  *         .exec_queue_id = exec_queue,
1488  *         .syncs = &sync,
1489  *         .num_syncs = 1,
1490  *         .address = BIND_ADDRESS,
1491  *         .num_batch_buffer = 1,
1492  *     };
1493  *     ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
1494  *
1495  */
1496 struct drm_xe_exec {
1497 	/** @extensions: Pointer to the first extension struct, if any */
1498 	__u64 extensions;
1499 
1500 	/** @exec_queue_id: Exec queue ID for the batch buffer */
1501 	__u32 exec_queue_id;
1502 
1503 #define DRM_XE_MAX_SYNCS 1024
1504 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
1505 	__u32 num_syncs;
1506 
1507 	/** @syncs: Pointer to struct drm_xe_sync array. */
1508 	__u64 syncs;
1509 
1510 	/**
1511 	 * @address: address of batch buffer if num_batch_buffer == 1 or an
1512 	 * array of batch buffer addresses
1513 	 */
1514 	__u64 address;
1515 
1516 	/**
1517 	 * @num_batch_buffer: number of batch buffer in this exec, must match
1518 	 * the width of the engine
1519 	 */
1520 	__u16 num_batch_buffer;
1521 
1522 	/** @pad: MBZ */
1523 	__u16 pad[3];
1524 
1525 	/** @reserved: Reserved */
1526 	__u64 reserved[2];
1527 };
1528 
1529 /**
1530  * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1531  *
1532  * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1533  * instances list and check if user fence is complete::
1534  *
1535  *	(*addr & MASK) OP (VALUE & MASK)
1536  *
1537  * Returns to user on user fence completion or timeout.
1538  *
1539  * The @op can be:
1540  *  - %DRM_XE_UFENCE_WAIT_OP_EQ
1541  *  - %DRM_XE_UFENCE_WAIT_OP_NEQ
1542  *  - %DRM_XE_UFENCE_WAIT_OP_GT
1543  *  - %DRM_XE_UFENCE_WAIT_OP_GTE
1544  *  - %DRM_XE_UFENCE_WAIT_OP_LT
1545  *  - %DRM_XE_UFENCE_WAIT_OP_LTE
1546  *
1547  * and the @flags can be:
1548  *  - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1549  *  - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1550  *
1551  * The @mask values can be for example:
1552  *  - 0xffu for u8
1553  *  - 0xffffu for u16
1554  *  - 0xffffffffu for u32
1555  *  - 0xffffffffffffffffu for u64
1556  */
1557 struct drm_xe_wait_user_fence {
1558 	/** @extensions: Pointer to the first extension struct, if any */
1559 	__u64 extensions;
1560 
1561 	/**
1562 	 * @addr: user pointer address to wait on, must qword aligned
1563 	 */
1564 	__u64 addr;
1565 
1566 #define DRM_XE_UFENCE_WAIT_OP_EQ	0x0
1567 #define DRM_XE_UFENCE_WAIT_OP_NEQ	0x1
1568 #define DRM_XE_UFENCE_WAIT_OP_GT	0x2
1569 #define DRM_XE_UFENCE_WAIT_OP_GTE	0x3
1570 #define DRM_XE_UFENCE_WAIT_OP_LT	0x4
1571 #define DRM_XE_UFENCE_WAIT_OP_LTE	0x5
1572 	/** @op: wait operation (type of comparison) */
1573 	__u16 op;
1574 
1575 #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME	(1 << 0)
1576 	/** @flags: wait flags */
1577 	__u16 flags;
1578 
1579 	/** @pad: MBZ */
1580 	__u32 pad;
1581 
1582 	/** @value: compare value */
1583 	__u64 value;
1584 
1585 	/** @mask: comparison mask */
1586 	__u64 mask;
1587 
1588 	/**
1589 	 * @timeout: how long to wait before bailing, value in nanoseconds.
1590 	 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1591 	 * it contains timeout expressed in nanoseconds to wait (fence will
1592 	 * expire at now() + timeout).
1593 	 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1594 	 * will end at timeout (uses system MONOTONIC_CLOCK).
1595 	 * Passing negative timeout leads to neverending wait.
1596 	 *
1597 	 * On relative timeout this value is updated with timeout left
1598 	 * (for restarting the call in case of signal delivery).
1599 	 * On absolute timeout this value stays intact (restarted call still
1600 	 * expire at the same point of time).
1601 	 */
1602 	__s64 timeout;
1603 
1604 	/** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
1605 	__u32 exec_queue_id;
1606 
1607 	/** @pad2: MBZ */
1608 	__u32 pad2;
1609 
1610 	/** @reserved: Reserved */
1611 	__u64 reserved[2];
1612 };
1613 
1614 /**
1615  * enum drm_xe_observation_type - Observation stream types
1616  */
1617 enum drm_xe_observation_type {
1618 	/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
1619 	DRM_XE_OBSERVATION_TYPE_OA,
1620 	/** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
1621 	DRM_XE_OBSERVATION_TYPE_EU_STALL,
1622 };
1623 
1624 /**
1625  * enum drm_xe_observation_op - Observation stream ops
1626  */
1627 enum drm_xe_observation_op {
1628 	/** @DRM_XE_OBSERVATION_OP_STREAM_OPEN: Open an observation stream */
1629 	DRM_XE_OBSERVATION_OP_STREAM_OPEN,
1630 
1631 	/** @DRM_XE_OBSERVATION_OP_ADD_CONFIG: Add observation stream config */
1632 	DRM_XE_OBSERVATION_OP_ADD_CONFIG,
1633 
1634 	/** @DRM_XE_OBSERVATION_OP_REMOVE_CONFIG: Remove observation stream config */
1635 	DRM_XE_OBSERVATION_OP_REMOVE_CONFIG,
1636 };
1637 
1638 /**
1639  * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1640  *
1641  * The observation layer enables multiplexing observation streams of
1642  * multiple types. The actual params for a particular stream operation are
1643  * supplied via the @param pointer (use __copy_from_user to get these
1644  * params).
1645  */
1646 struct drm_xe_observation_param {
1647 	/** @extensions: Pointer to the first extension struct, if any */
1648 	__u64 extensions;
1649 	/** @observation_type: observation stream type, of enum @drm_xe_observation_type */
1650 	__u64 observation_type;
1651 	/** @observation_op: observation stream op, of enum @drm_xe_observation_op */
1652 	__u64 observation_op;
1653 	/** @param: Pointer to actual stream params */
1654 	__u64 param;
1655 };
1656 
1657 /**
1658  * enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1659  *
1660  * Information exchanged between userspace and kernel for observation fd
1661  * ioctl's is stream type specific
1662  */
1663 enum drm_xe_observation_ioctls {
1664 	/** @DRM_XE_OBSERVATION_IOCTL_ENABLE: Enable data capture for an observation stream */
1665 	DRM_XE_OBSERVATION_IOCTL_ENABLE = _IO('i', 0x0),
1666 
1667 	/** @DRM_XE_OBSERVATION_IOCTL_DISABLE: Disable data capture for a observation stream */
1668 	DRM_XE_OBSERVATION_IOCTL_DISABLE = _IO('i', 0x1),
1669 
1670 	/** @DRM_XE_OBSERVATION_IOCTL_CONFIG: Change observation stream configuration */
1671 	DRM_XE_OBSERVATION_IOCTL_CONFIG = _IO('i', 0x2),
1672 
1673 	/** @DRM_XE_OBSERVATION_IOCTL_STATUS: Return observation stream status */
1674 	DRM_XE_OBSERVATION_IOCTL_STATUS = _IO('i', 0x3),
1675 
1676 	/** @DRM_XE_OBSERVATION_IOCTL_INFO: Return observation stream info */
1677 	DRM_XE_OBSERVATION_IOCTL_INFO = _IO('i', 0x4),
1678 };
1679 
1680 /**
1681  * enum drm_xe_oa_unit_type - OA unit types
1682  */
1683 enum drm_xe_oa_unit_type {
1684 	/**
1685 	 * @DRM_XE_OA_UNIT_TYPE_OAG: OAG OA unit. OAR/OAC are considered
1686 	 * sub-types of OAG. For OAR/OAC, use OAG.
1687 	 */
1688 	DRM_XE_OA_UNIT_TYPE_OAG,
1689 
1690 	/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
1691 	DRM_XE_OA_UNIT_TYPE_OAM,
1692 
1693 	/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
1694 	DRM_XE_OA_UNIT_TYPE_OAM_SAG,
1695 
1696 	/** @DRM_XE_OA_UNIT_TYPE_MERT: MERT OA unit */
1697 	DRM_XE_OA_UNIT_TYPE_MERT,
1698 };
1699 
1700 /**
1701  * struct drm_xe_oa_unit - describe OA unit
1702  */
1703 struct drm_xe_oa_unit {
1704 	/** @extensions: Pointer to the first extension struct, if any */
1705 	__u64 extensions;
1706 
1707 	/** @oa_unit_id: OA unit ID */
1708 	__u32 oa_unit_id;
1709 
1710 	/** @oa_unit_type: OA unit type of @drm_xe_oa_unit_type */
1711 	__u32 oa_unit_type;
1712 
1713 	/** @capabilities: OA capabilities bit-mask */
1714 	__u64 capabilities;
1715 #define DRM_XE_OA_CAPS_BASE		(1 << 0)
1716 #define DRM_XE_OA_CAPS_SYNCS		(1 << 1)
1717 #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE	(1 << 2)
1718 #define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS	(1 << 3)
1719 #define DRM_XE_OA_CAPS_OAM		(1 << 4)
1720 #define DRM_XE_OA_CAPS_OA_UNIT_GT_ID	(1 << 5)
1721 
1722 	/** @oa_timestamp_freq: OA timestamp freq */
1723 	__u64 oa_timestamp_freq;
1724 
1725 	/** @gt_id: gt id for this OA unit */
1726 	__u16 gt_id;
1727 
1728 	/** @reserved1: MBZ */
1729 	__u16 reserved1[3];
1730 
1731 	/** @reserved: MBZ */
1732 	__u64 reserved[3];
1733 
1734 	/** @num_engines: number of engines in @eci array */
1735 	__u64 num_engines;
1736 
1737 	/** @eci: engines attached to this OA unit */
1738 	struct drm_xe_engine_class_instance eci[];
1739 };
1740 
1741 /**
1742  * struct drm_xe_query_oa_units - describe OA units
1743  *
1744  * If a query is made with a struct drm_xe_device_query where .query
1745  * is equal to DRM_XE_DEVICE_QUERY_OA_UNITS, then the reply uses struct
1746  * drm_xe_query_oa_units in .data.
1747  *
1748  * OA unit properties for all OA units can be accessed using a code block
1749  * such as the one below:
1750  *
1751  * .. code-block:: C
1752  *
1753  *	struct drm_xe_query_oa_units *qoa;
1754  *	struct drm_xe_oa_unit *oau;
1755  *	u8 *poau;
1756  *
1757  *	// malloc qoa and issue DRM_XE_DEVICE_QUERY_OA_UNITS. Then:
1758  *	poau = (u8 *)&qoa->oa_units[0];
1759  *	for (int i = 0; i < qoa->num_oa_units; i++) {
1760  *		oau = (struct drm_xe_oa_unit *)poau;
1761  *		// Access 'struct drm_xe_oa_unit' fields here
1762  *		poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
1763  *	}
1764  */
1765 struct drm_xe_query_oa_units {
1766 	/** @extensions: Pointer to the first extension struct, if any */
1767 	__u64 extensions;
1768 	/** @num_oa_units: number of OA units returned in oau[] */
1769 	__u32 num_oa_units;
1770 	/** @pad: MBZ */
1771 	__u32 pad;
1772 	/**
1773 	 * @oa_units: struct @drm_xe_oa_unit array returned for this device.
1774 	 * Written below as a u64 array to avoid problems with nested flexible
1775 	 * arrays with some compilers
1776 	 */
1777 	__u64 oa_units[];
1778 };
1779 
1780 /**
1781  * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
1782  * 52198/60942
1783  */
1784 enum drm_xe_oa_format_type {
1785 	/** @DRM_XE_OA_FMT_TYPE_OAG: OAG report format */
1786 	DRM_XE_OA_FMT_TYPE_OAG,
1787 	/** @DRM_XE_OA_FMT_TYPE_OAR: OAR report format */
1788 	DRM_XE_OA_FMT_TYPE_OAR,
1789 	/** @DRM_XE_OA_FMT_TYPE_OAM: OAM report format */
1790 	DRM_XE_OA_FMT_TYPE_OAM,
1791 	/** @DRM_XE_OA_FMT_TYPE_OAC: OAC report format */
1792 	DRM_XE_OA_FMT_TYPE_OAC,
1793 	/** @DRM_XE_OA_FMT_TYPE_OAM_MPEC: OAM SAMEDIA or OAM MPEC report format */
1794 	DRM_XE_OA_FMT_TYPE_OAM_MPEC,
1795 	/** @DRM_XE_OA_FMT_TYPE_PEC: PEC report format */
1796 	DRM_XE_OA_FMT_TYPE_PEC,
1797 };
1798 
1799 /**
1800  * enum drm_xe_oa_property_id - OA stream property id's
1801  *
1802  * Stream params are specified as a chain of @drm_xe_ext_set_property
1803  * struct's, with @property values from enum @drm_xe_oa_property_id and
1804  * @drm_xe_user_extension base.name set to @DRM_XE_OA_EXTENSION_SET_PROPERTY.
1805  * @param field in struct @drm_xe_observation_param points to the first
1806  * @drm_xe_ext_set_property struct.
1807  *
1808  * Exactly the same mechanism is also used for stream reconfiguration using the
1809  * @DRM_XE_OBSERVATION_IOCTL_CONFIG observation stream fd ioctl, though only a
1810  * subset of properties below can be specified for stream reconfiguration.
1811  */
1812 enum drm_xe_oa_property_id {
1813 #define DRM_XE_OA_EXTENSION_SET_PROPERTY	0
1814 	/**
1815 	 * @DRM_XE_OA_PROPERTY_OA_UNIT_ID: ID of the OA unit on which to open
1816 	 * the OA stream, see @oa_unit_id in 'struct
1817 	 * drm_xe_query_oa_units'. Defaults to 0 if not provided.
1818 	 */
1819 	DRM_XE_OA_PROPERTY_OA_UNIT_ID = 1,
1820 
1821 	/**
1822 	 * @DRM_XE_OA_PROPERTY_SAMPLE_OA: A value of 1 requests inclusion of raw
1823 	 * OA unit reports or stream samples in a global buffer attached to an
1824 	 * OA unit.
1825 	 */
1826 	DRM_XE_OA_PROPERTY_SAMPLE_OA,
1827 
1828 	/**
1829 	 * @DRM_XE_OA_PROPERTY_OA_METRIC_SET: OA metrics defining contents of OA
1830 	 * reports, previously added via @DRM_XE_OBSERVATION_OP_ADD_CONFIG.
1831 	 */
1832 	DRM_XE_OA_PROPERTY_OA_METRIC_SET,
1833 
1834 	/** @DRM_XE_OA_PROPERTY_OA_FORMAT: OA counter report format */
1835 	DRM_XE_OA_PROPERTY_OA_FORMAT,
1836 	/*
1837 	 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
1838 	 * in terms of the following quantities: a. enum @drm_xe_oa_format_type
1839 	 * b. Counter select c. Counter size and d. BC report. Also refer to the
1840 	 * oa_formats array in drivers/gpu/drm/xe/xe_oa.c.
1841 	 */
1842 #define DRM_XE_OA_FORMAT_MASK_FMT_TYPE		(0xffu << 0)
1843 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL	(0xffu << 8)
1844 #define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE	(0xffu << 16)
1845 #define DRM_XE_OA_FORMAT_MASK_BC_REPORT		(0xffu << 24)
1846 
1847 	/**
1848 	 * @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
1849 	 * sampling with sampling frequency proportional to 2^(period_exponent + 1)
1850 	 */
1851 	DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT,
1852 
1853 	/**
1854 	 * @DRM_XE_OA_PROPERTY_OA_DISABLED: A value of 1 will open the OA
1855 	 * stream in a DISABLED state (see @DRM_XE_OBSERVATION_IOCTL_ENABLE).
1856 	 */
1857 	DRM_XE_OA_PROPERTY_OA_DISABLED,
1858 
1859 	/**
1860 	 * @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID: Open the stream for a specific
1861 	 * @exec_queue_id. OA queries can be executed on this exec queue.
1862 	 */
1863 	DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID,
1864 
1865 	/**
1866 	 * @DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE: Optional engine instance to
1867 	 * pass along with @DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID or will default to 0.
1868 	 */
1869 	DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE,
1870 
1871 	/**
1872 	 * @DRM_XE_OA_PROPERTY_NO_PREEMPT: Allow preemption and timeslicing
1873 	 * to be disabled for the stream exec queue.
1874 	 */
1875 	DRM_XE_OA_PROPERTY_NO_PREEMPT,
1876 
1877 	/**
1878 	 * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array
1879 	 * specified in @DRM_XE_OA_PROPERTY_SYNCS
1880 	 */
1881 	DRM_XE_OA_PROPERTY_NUM_SYNCS,
1882 
1883 	/**
1884 	 * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array
1885 	 * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA
1886 	 * configuration will wait till input fences signal. Output fences
1887 	 * will signal after the new OA configuration takes effect. For
1888 	 * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar
1889 	 * to the VM bind case.
1890 	 */
1891 	DRM_XE_OA_PROPERTY_SYNCS,
1892 
1893 	/**
1894 	 * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be
1895 	 * allocated by the driver in bytes. Supported sizes are powers of
1896 	 * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA
1897 	 * buffer is allocated by default.
1898 	 */
1899 	DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE,
1900 
1901 	/**
1902 	 * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait
1903 	 * for before unblocking poll or read
1904 	 */
1905 	DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS,
1906 };
1907 
1908 /**
1909  * struct drm_xe_oa_config - OA metric configuration
1910  *
1911  * Multiple OA configs can be added using @DRM_XE_OBSERVATION_OP_ADD_CONFIG. A
1912  * particular config can be specified when opening an OA stream using
1913  * @DRM_XE_OA_PROPERTY_OA_METRIC_SET property.
1914  */
1915 struct drm_xe_oa_config {
1916 	/** @extensions: Pointer to the first extension struct, if any */
1917 	__u64 extensions;
1918 
1919 	/** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
1920 	char uuid[36];
1921 
1922 	/** @n_regs: Number of regs in @regs_ptr */
1923 	__u32 n_regs;
1924 
1925 	/**
1926 	 * @regs_ptr: Pointer to (register address, value) pairs for OA config
1927 	 * registers. Expected length of buffer is: (2 * sizeof(u32) * @n_regs).
1928 	 */
1929 	__u64 regs_ptr;
1930 };
1931 
1932 /**
1933  * struct drm_xe_oa_stream_status - OA stream status returned from
1934  * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl. Userspace can
1935  * call the ioctl to query stream status in response to EIO errno from
1936  * observation fd read().
1937  */
1938 struct drm_xe_oa_stream_status {
1939 	/** @extensions: Pointer to the first extension struct, if any */
1940 	__u64 extensions;
1941 
1942 	/** @oa_status: OA stream status (see Bspec 46717/61226) */
1943 	__u64 oa_status;
1944 #define DRM_XE_OASTATUS_MMIO_TRG_Q_FULL		(1 << 3)
1945 #define DRM_XE_OASTATUS_COUNTER_OVERFLOW	(1 << 2)
1946 #define DRM_XE_OASTATUS_BUFFER_OVERFLOW		(1 << 1)
1947 #define DRM_XE_OASTATUS_REPORT_LOST		(1 << 0)
1948 
1949 	/** @reserved: reserved for future use */
1950 	__u64 reserved[3];
1951 };
1952 
1953 /**
1954  * struct drm_xe_oa_stream_info - OA stream info returned from
1955  * @DRM_XE_OBSERVATION_IOCTL_INFO observation stream fd ioctl
1956  */
1957 struct drm_xe_oa_stream_info {
1958 	/** @extensions: Pointer to the first extension struct, if any */
1959 	__u64 extensions;
1960 
1961 	/** @oa_buf_size: OA buffer size */
1962 	__u64 oa_buf_size;
1963 
1964 	/** @reserved: reserved for future use */
1965 	__u64 reserved[3];
1966 };
1967 
1968 /**
1969  * enum drm_xe_pxp_session_type - Supported PXP session types.
1970  *
1971  * We currently only support HWDRM sessions, which are used for protected
1972  * content that ends up being displayed, but the HW supports multiple types, so
1973  * we might extend support in the future.
1974  */
1975 enum drm_xe_pxp_session_type {
1976 	/** @DRM_XE_PXP_TYPE_NONE: PXP not used */
1977 	DRM_XE_PXP_TYPE_NONE = 0,
1978 	/**
1979 	 * @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
1980 	 * up on the display.
1981 	 */
1982 	DRM_XE_PXP_TYPE_HWDRM = 1,
1983 };
1984 
1985 /* ID of the protected content session managed by Xe when PXP is active */
1986 #define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
1987 
1988 /**
1989  * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
1990  *
1991  * These properties are passed to the driver at open as a chain of
1992  * @drm_xe_ext_set_property structures with @property set to these
1993  * properties' enums and @value set to the corresponding values of these
1994  * properties. @drm_xe_user_extension base.name should be set to
1995  * @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
1996  *
1997  * With the file descriptor obtained from open, user space must enable
1998  * the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
1999  * calling read(). EIO errno from read() indicates HW dropped data
2000  * due to full buffer.
2001  */
2002 enum drm_xe_eu_stall_property_id {
2003 #define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY		0
2004 	/**
2005 	 * @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
2006 	 * EU stall data will be captured.
2007 	 */
2008 	DRM_XE_EU_STALL_PROP_GT_ID = 1,
2009 
2010 	/**
2011 	 * @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
2012 	 * GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
2013 	 */
2014 	DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
2015 
2016 	/**
2017 	 * @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
2018 	 * EU stall data reports to be present in the kernel buffer
2019 	 * before unblocking a blocked poll or read.
2020 	 */
2021 	DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
2022 };
2023 
2024 /**
2025  * struct drm_xe_query_eu_stall - Information about EU stall sampling.
2026  *
2027  * If a query is made with a struct @drm_xe_device_query where .query
2028  * is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
2029  * struct @drm_xe_query_eu_stall in .data.
2030  */
2031 struct drm_xe_query_eu_stall {
2032 	/** @extensions: Pointer to the first extension struct, if any */
2033 	__u64 extensions;
2034 
2035 	/** @capabilities: EU stall capabilities bit-mask */
2036 	__u64 capabilities;
2037 #define DRM_XE_EU_STALL_CAPS_BASE		(1 << 0)
2038 
2039 	/** @record_size: size of each EU stall data record */
2040 	__u64 record_size;
2041 
2042 	/** @per_xecore_buf_size: internal per XeCore buffer size */
2043 	__u64 per_xecore_buf_size;
2044 
2045 	/** @reserved: Reserved */
2046 	__u64 reserved[5];
2047 
2048 	/** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
2049 	__u64 num_sampling_rates;
2050 
2051 	/**
2052 	 * @sampling_rates: Flexible array of sampling rates
2053 	 * sorted in the fastest to slowest order.
2054 	 * Sampling rates are specified in GPU clock cycles.
2055 	 */
2056 	__u64 sampling_rates[];
2057 };
2058 
2059 /**
2060  * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
2061  *
2062  * This structure is used to set memory attributes for a virtual address range
2063  * in a VM. The type of attribute is specified by @type, and the corresponding
2064  * union member is used to provide additional parameters for @type.
2065  *
2066  * Supported attribute types:
2067  *  - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
2068  *  - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
2069  *  - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
2070  *
2071  * Example:
2072  *
2073  * .. code-block:: C
2074  *
2075  *    struct drm_xe_madvise madvise = {
2076  *         .vm_id = vm_id,
2077  *         .start = 0x100000,
2078  *         .range = 0x2000,
2079  *         .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
2080  *         .atomic_val = DRM_XE_ATOMIC_DEVICE,
2081  *    };
2082  *
2083  *    ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
2084  *
2085  */
2086 struct drm_xe_madvise {
2087 	/** @extensions: Pointer to the first extension struct, if any */
2088 	__u64 extensions;
2089 
2090 	/** @start: start of the virtual address range */
2091 	__u64 start;
2092 
2093 	/** @range: size of the virtual address range */
2094 	__u64 range;
2095 
2096 	/** @vm_id: vm_id of the virtual range */
2097 	__u32 vm_id;
2098 
2099 #define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC	0
2100 #define DRM_XE_MEM_RANGE_ATTR_ATOMIC		1
2101 #define DRM_XE_MEM_RANGE_ATTR_PAT		2
2102 	/** @type: type of attribute */
2103 	__u32 type;
2104 
2105 	union {
2106 		/**
2107 		 * @preferred_mem_loc: preferred memory location
2108 		 *
2109 		 * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
2110 		 *
2111 		 * Supported values for @preferred_mem_loc.devmem_fd:
2112 		 *  - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
2113 		 *  - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
2114 		 *
2115 		 * Supported values for @preferred_mem_loc.migration_policy:
2116 		 *  - DRM_XE_MIGRATE_ALL_PAGES
2117 		 *  - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
2118 		 */
2119 		struct {
2120 #define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE	0
2121 #define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM	-1
2122 			/**
2123 			 * @preferred_mem_loc.devmem_fd:
2124 			 * Device file-descriptor of the device where the
2125 			 * preferred memory is located, or one of the
2126 			 * above special values. Please also see
2127 			 * @preferred_mem_loc.region_instance below.
2128 			 */
2129 			__u32 devmem_fd;
2130 
2131 #define DRM_XE_MIGRATE_ALL_PAGES		0
2132 #define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES	1
2133 			/** @preferred_mem_loc.migration_policy: Page migration policy */
2134 			__u16 migration_policy;
2135 
2136 			/**
2137 			 * @preferred_mem_loc.region_instance : Region instance.
2138 			 * MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
2139 			 * Otherwise should point to the desired device
2140 			 * VRAM instance of the device indicated by
2141 			 * @preferred_mem_loc.devmem_fd.
2142 			 */
2143 			__u16 region_instance;
2144 
2145 			/** @preferred_mem_loc.reserved : Reserved */
2146 			__u64 reserved;
2147 		} preferred_mem_loc;
2148 
2149 		/**
2150 		 * @atomic: Atomic access policy
2151 		 *
2152 		 * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
2153 		 *
2154 		 * Supported values for @atomic.val:
2155 		 *  - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
2156 		 *    Support both GPU and CPU atomic operations for system allocator.
2157 		 *    Support GPU atomic operations for normal(bo) allocator.
2158 		 *  - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
2159 		 *  - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
2160 		 *  - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
2161 		 */
2162 		struct {
2163 #define DRM_XE_ATOMIC_UNDEFINED	0
2164 #define DRM_XE_ATOMIC_DEVICE	1
2165 #define DRM_XE_ATOMIC_GLOBAL	2
2166 #define DRM_XE_ATOMIC_CPU	3
2167 			/** @atomic.val: value of atomic operation */
2168 			__u32 val;
2169 
2170 			/** @atomic.pad: MBZ */
2171 			__u32 pad;
2172 
2173 			/** @atomic.reserved: Reserved */
2174 			__u64 reserved;
2175 		} atomic;
2176 
2177 		/**
2178 		 * @pat_index: Page attribute table index
2179 		 *
2180 		 * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
2181 		 */
2182 		struct {
2183 			/** @pat_index.val: PAT index value */
2184 			__u32 val;
2185 
2186 			/** @pat_index.pad: MBZ */
2187 			__u32 pad;
2188 
2189 			/** @pat_index.reserved: Reserved */
2190 			__u64 reserved;
2191 		} pat_index;
2192 	};
2193 
2194 	/** @reserved: Reserved */
2195 	__u64 reserved[2];
2196 };
2197 
2198 /**
2199  * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
2200  *
2201  * This structure is provided by userspace and filled by KMD in response to the
2202  * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
2203  * a memory ranges within a user specified address range in a VM.
2204  *
2205  * The structure includes information such as atomic access policy,
2206  * page attribute table (PAT) index, and preferred memory location.
2207  * Userspace allocates an array of these structures and passes a pointer to the
2208  * ioctl to retrieve attributes for each memory ranges
2209  *
2210  * @extensions: Pointer to the first extension struct, if any
2211  * @start: Start address of the memory range
2212  * @end: End address of the virtual memory range
2213  *
2214  */
2215 struct drm_xe_mem_range_attr {
2216 	 /** @extensions: Pointer to the first extension struct, if any */
2217 	__u64 extensions;
2218 
2219 	/** @start: start of the memory range */
2220 	__u64 start;
2221 
2222 	/** @end: end of the memory range */
2223 	__u64 end;
2224 
2225 	/** @preferred_mem_loc: preferred memory location */
2226 	struct {
2227 		/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
2228 		__u32 devmem_fd;
2229 
2230 		/** @preferred_mem_loc.migration_policy: Page migration policy */
2231 		__u32 migration_policy;
2232 	} preferred_mem_loc;
2233 
2234 	/** @atomic: Atomic access policy */
2235 	struct {
2236 		/** @atomic.val: atomic attribute */
2237 		__u32 val;
2238 
2239 		/** @atomic.reserved: Reserved */
2240 		__u32 reserved;
2241 	} atomic;
2242 
2243 	 /** @pat_index: Page attribute table index */
2244 	struct {
2245 		/** @pat_index.val: PAT index */
2246 		__u32 val;
2247 
2248 		/** @pat_index.reserved: Reserved */
2249 		__u32 reserved;
2250 	} pat_index;
2251 
2252 	/** @reserved: Reserved */
2253 	__u64 reserved[2];
2254 };
2255 
2256 /**
2257  * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
2258  *
2259  * This structure is used to query memory attributes of memory regions
2260  * within a user specified address range in a VM. It provides detailed
2261  * information about each memory range, including atomic access policy,
2262  * page attribute table (PAT) index, and preferred memory location.
2263  *
2264  * Userspace first calls the ioctl with @num_mem_ranges = 0,
2265  * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
2266  * the number of memory regions and size of each memory range attribute.
2267  * Then, it allocates a buffer of that size and calls the ioctl again to fill
2268  * the buffer with memory range attributes.
2269  *
2270  * If second call fails with -ENOSPC, it means memory ranges changed between
2271  * first call and now, retry IOCTL again with @num_mem_ranges = 0,
2272  * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
2273  * Second ioctl call.
2274  *
2275  * Example:
2276  *
2277  * .. code-block:: C
2278  *
2279  *    struct drm_xe_vm_query_mem_range_attr query = {
2280  *         .vm_id = vm_id,
2281  *         .start = 0x100000,
2282  *         .range = 0x2000,
2283  *     };
2284  *
2285  *    // First ioctl call to get num of mem regions and sizeof each attribute
2286  *    ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
2287  *
2288  *    // Allocate buffer for the memory region attributes
2289  *    void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
2290  *    void *ptr_start = ptr;
2291  *
2292  *    query.vector_of_mem_attr = (uintptr_t)ptr;
2293  *
2294  *    // Second ioctl call to actually fill the memory attributes
2295  *    ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
2296  *
2297  *    // Iterate over the returned memory region attributes
2298  *    for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
2299  *       struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
2300  *
2301  *       // Do something with attr
2302  *
2303  *       // Move pointer by one entry
2304  *       ptr += query.sizeof_mem_range_attr;
2305  *     }
2306  *
2307  *    free(ptr_start);
2308  */
2309 struct drm_xe_vm_query_mem_range_attr {
2310 	/** @extensions: Pointer to the first extension struct, if any */
2311 	__u64 extensions;
2312 
2313 	/** @vm_id: vm_id of the virtual range */
2314 	__u32 vm_id;
2315 
2316 	/** @num_mem_ranges: number of mem_ranges in range */
2317 	__u32 num_mem_ranges;
2318 
2319 	/** @start: start of the virtual address range */
2320 	__u64 start;
2321 
2322 	/** @range: size of the virtual address range */
2323 	__u64 range;
2324 
2325 	/** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
2326 	__u64 sizeof_mem_range_attr;
2327 
2328 	/** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
2329 	__u64 vector_of_mem_attr;
2330 
2331 	/** @reserved: Reserved */
2332 	__u64 reserved[2];
2333 
2334 };
2335 
2336 /**
2337  * struct drm_xe_exec_queue_set_property - exec queue set property
2338  *
2339  * Sets execution queue properties dynamically.
2340  * Currently only %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY
2341  * property can be dynamically set.
2342  */
2343 struct drm_xe_exec_queue_set_property {
2344 	/** @extensions: Pointer to the first extension struct, if any */
2345 	__u64 extensions;
2346 
2347 	/** @exec_queue_id: Exec queue ID */
2348 	__u32 exec_queue_id;
2349 
2350 	/** @property: property to set */
2351 	__u32 property;
2352 
2353 	/** @value: property value */
2354 	__u64 value;
2355 
2356 	/** @reserved: Reserved */
2357 	__u64 reserved[2];
2358 };
2359 
2360 #if defined(__cplusplus)
2361 }
2362 #endif
2363 
2364 #endif /* _UAPI_XE_DRM_H_ */
2365