xref: /linux/include/uapi/drm/xe_drm.h (revision a9a95523c84957b7863796b5d1df2f3f5dca4519)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef _UAPI_XE_DRM_H_
7 #define _UAPI_XE_DRM_H_
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 /* Please note that modifications to all structs defined here are
16  * subject to backwards-compatibility constraints.
17  */
18 
19 /**
20  * DOC: uevent generated by xe on it's pci node.
21  *
22  * XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
23  * fails. The value supplied with the event is always "NEEDS_RESET".
24  * Additional information supplied is tile id and gt id of the gt unit for
25  * which reset has failed.
26  */
27 #define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
28 
29 /**
30  * struct xe_user_extension - Base class for defining a chain of extensions
31  *
32  * Many interfaces need to grow over time. In most cases we can simply
33  * extend the struct and have userspace pass in more data. Another option,
34  * as demonstrated by Vulkan's approach to providing extensions for forward
35  * and backward compatibility, is to use a list of optional structs to
36  * provide those extra details.
37  *
38  * The key advantage to using an extension chain is that it allows us to
39  * redefine the interface more easily than an ever growing struct of
40  * increasing complexity, and for large parts of that interface to be
41  * entirely optional. The downside is more pointer chasing; chasing across
42  * the __user boundary with pointers encapsulated inside u64.
43  *
44  * Example chaining:
45  *
46  * .. code-block:: C
47  *
48  *	struct xe_user_extension ext3 {
49  *		.next_extension = 0, // end
50  *		.name = ...,
51  *	};
52  *	struct xe_user_extension ext2 {
53  *		.next_extension = (uintptr_t)&ext3,
54  *		.name = ...,
55  *	};
56  *	struct xe_user_extension ext1 {
57  *		.next_extension = (uintptr_t)&ext2,
58  *		.name = ...,
59  *	};
60  *
61  * Typically the struct xe_user_extension would be embedded in some uAPI
62  * struct, and in this case we would feed it the head of the chain(i.e ext1),
63  * which would then apply all of the above extensions.
64  *
65  */
66 struct xe_user_extension {
67 	/**
68 	 * @next_extension:
69 	 *
70 	 * Pointer to the next struct xe_user_extension, or zero if the end.
71 	 */
72 	__u64 next_extension;
73 
74 	/**
75 	 * @name: Name of the extension.
76 	 *
77 	 * Note that the name here is just some integer.
78 	 *
79 	 * Also note that the name space for this is not global for the whole
80 	 * driver, but rather its scope/meaning is limited to the specific piece
81 	 * of uAPI which has embedded the struct xe_user_extension.
82 	 */
83 	__u32 name;
84 
85 	/**
86 	 * @pad: MBZ
87 	 *
88 	 * All undefined bits must be zero.
89 	 */
90 	__u32 pad;
91 };
92 
93 /*
94  * xe specific ioctls.
95  *
96  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
97  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
98  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
99  */
100 #define DRM_XE_DEVICE_QUERY		0x00
101 #define DRM_XE_GEM_CREATE		0x01
102 #define DRM_XE_GEM_MMAP_OFFSET		0x02
103 #define DRM_XE_VM_CREATE		0x03
104 #define DRM_XE_VM_DESTROY		0x04
105 #define DRM_XE_VM_BIND			0x05
106 #define DRM_XE_EXEC_QUEUE_CREATE		0x06
107 #define DRM_XE_EXEC_QUEUE_DESTROY		0x07
108 #define DRM_XE_EXEC			0x08
109 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY	0x09
110 #define DRM_XE_WAIT_USER_FENCE		0x0a
111 #define DRM_XE_VM_MADVISE		0x0b
112 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x0c
113 
114 /* Must be kept compact -- no holes */
115 #define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
116 #define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
117 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
118 #define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
119 #define DRM_IOCTL_XE_VM_DESTROY			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
120 #define DRM_IOCTL_XE_VM_BIND			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
121 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
122 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
123 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
124 #define DRM_IOCTL_XE_EXEC			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
125 #define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY	 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
126 #define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
127 #define DRM_IOCTL_XE_VM_MADVISE			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
128 
129 /** struct drm_xe_engine_class_instance - instance of an engine class */
130 struct drm_xe_engine_class_instance {
131 #define DRM_XE_ENGINE_CLASS_RENDER		0
132 #define DRM_XE_ENGINE_CLASS_COPY		1
133 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE	2
134 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE	3
135 #define DRM_XE_ENGINE_CLASS_COMPUTE		4
136 	/*
137 	 * Kernel only classes (not actual hardware engine class). Used for
138 	 * creating ordered queues of VM bind operations.
139 	 */
140 #define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC	5
141 #define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC	6
142 	__u16 engine_class;
143 
144 	__u16 engine_instance;
145 	__u16 gt_id;
146 	__u16 rsvd;
147 };
148 
149 /**
150  * enum drm_xe_memory_class - Supported memory classes.
151  */
152 enum drm_xe_memory_class {
153 	/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
154 	XE_MEM_REGION_CLASS_SYSMEM = 0,
155 	/**
156 	 * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
157 	 * represents the memory that is local to the device, which we
158 	 * call VRAM. Not valid on integrated platforms.
159 	 */
160 	XE_MEM_REGION_CLASS_VRAM
161 };
162 
163 /**
164  * struct drm_xe_query_mem_region - Describes some region as known to
165  * the driver.
166  */
167 struct drm_xe_query_mem_region {
168 	/**
169 	 * @mem_class: The memory class describing this region.
170 	 *
171 	 * See enum drm_xe_memory_class for supported values.
172 	 */
173 	__u16 mem_class;
174 	/**
175 	 * @instance: The instance for this region.
176 	 *
177 	 * The @mem_class and @instance taken together will always give
178 	 * a unique pair.
179 	 */
180 	__u16 instance;
181 	/** @pad: MBZ */
182 	__u32 pad;
183 	/**
184 	 * @min_page_size: Min page-size in bytes for this region.
185 	 *
186 	 * When the kernel allocates memory for this region, the
187 	 * underlying pages will be at least @min_page_size in size.
188 	 *
189 	 * Important note: When userspace allocates a GTT address which
190 	 * can point to memory allocated from this region, it must also
191 	 * respect this minimum alignment. This is enforced by the
192 	 * kernel.
193 	 */
194 	__u32 min_page_size;
195 	/**
196 	 * @total_size: The usable size in bytes for this region.
197 	 */
198 	__u64 total_size;
199 	/**
200 	 * @used: Estimate of the memory used in bytes for this region.
201 	 *
202 	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
203 	 * accounting.  Without this the value here will always equal
204 	 * zero.
205 	 */
206 	__u64 used;
207 	/**
208 	 * @cpu_visible_size: How much of this region can be CPU
209 	 * accessed, in bytes.
210 	 *
211 	 * This will always be <= @total_size, and the remainder (if
212 	 * any) will not be CPU accessible. If the CPU accessible part
213 	 * is smaller than @total_size then this is referred to as a
214 	 * small BAR system.
215 	 *
216 	 * On systems without small BAR (full BAR), the probed_size will
217 	 * always equal the @total_size, since all of it will be CPU
218 	 * accessible.
219 	 *
220 	 * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
221 	 * regions (for other types the value here will always equal
222 	 * zero).
223 	 */
224 	__u64 cpu_visible_size;
225 	/**
226 	 * @cpu_visible_used: Estimate of CPU visible memory used, in
227 	 * bytes.
228 	 *
229 	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
230 	 * accounting. Without this the value here will always equal
231 	 * zero.  Note this is only currently tracked for
232 	 * XE_MEM_REGION_CLASS_VRAM regions (for other types the value
233 	 * here will always be zero).
234 	 */
235 	__u64 cpu_visible_used;
236 	/** @reserved: MBZ */
237 	__u64 reserved[6];
238 };
239 
240 /**
241  * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
242  *
243  * If a query is made with a struct drm_xe_device_query where .query is equal to
244  * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
245  * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
246  * .data points to this allocated structure.
247  *
248  * The query returns the engine cycles and the frequency that can
249  * be used to calculate the engine timestamp. In addition the
250  * query returns a set of cpu timestamps that indicate when the command
251  * streamer cycle count was captured.
252  */
253 struct drm_xe_query_engine_cycles {
254 	/**
255 	 * @eci: This is input by the user and is the engine for which command
256 	 * streamer cycles is queried.
257 	 */
258 	struct drm_xe_engine_class_instance eci;
259 
260 	/**
261 	 * @clockid: This is input by the user and is the reference clock id for
262 	 * CPU timestamp. For definition, see clock_gettime(2) and
263 	 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
264 	 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
265 	 */
266 	__s32 clockid;
267 
268 	/** @width: Width of the engine cycle counter in bits. */
269 	__u32 width;
270 
271 	/**
272 	 * @engine_cycles: Engine cycles as read from its register
273 	 * at 0x358 offset.
274 	 */
275 	__u64 engine_cycles;
276 
277 	/** @engine_frequency: Frequency of the engine cycles in Hz. */
278 	__u64 engine_frequency;
279 
280 	/**
281 	 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
282 	 * reading the engine_cycles register using the reference clockid set by the
283 	 * user.
284 	 */
285 	__u64 cpu_timestamp;
286 
287 	/**
288 	 * @cpu_delta: Time delta in ns captured around reading the lower dword
289 	 * of the engine_cycles register.
290 	 */
291 	__u64 cpu_delta;
292 };
293 
294 /**
295  * struct drm_xe_query_mem_usage - describe memory regions and usage
296  *
297  * If a query is made with a struct drm_xe_device_query where .query
298  * is equal to DRM_XE_DEVICE_QUERY_MEM_USAGE, then the reply uses
299  * struct drm_xe_query_mem_usage in .data.
300  */
301 struct drm_xe_query_mem_usage {
302 	/** @num_regions: number of memory regions returned in @regions */
303 	__u32 num_regions;
304 	/** @pad: MBZ */
305 	__u32 pad;
306 	/** @regions: The returned regions for this device */
307 	struct drm_xe_query_mem_region regions[];
308 };
309 
310 /**
311  * struct drm_xe_query_config - describe the device configuration
312  *
313  * If a query is made with a struct drm_xe_device_query where .query
314  * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
315  * struct drm_xe_query_config in .data.
316  */
317 struct drm_xe_query_config {
318 	/** @num_params: number of parameters returned in info */
319 	__u32 num_params;
320 
321 	/** @pad: MBZ */
322 	__u32 pad;
323 
324 #define XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
325 #define XE_QUERY_CONFIG_FLAGS			1
326 	#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM		(0x1 << 0)
327 #define XE_QUERY_CONFIG_MIN_ALIGNMENT		2
328 #define XE_QUERY_CONFIG_VA_BITS			3
329 #define XE_QUERY_CONFIG_GT_COUNT		4
330 #define XE_QUERY_CONFIG_MEM_REGION_COUNT	5
331 #define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY	6
332 #define XE_QUERY_CONFIG_NUM_PARAM		(XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1)
333 	/** @info: array of elements containing the config info */
334 	__u64 info[];
335 };
336 
337 /**
338  * struct drm_xe_query_gt - describe an individual GT.
339  *
340  * To be used with drm_xe_query_gt_list, which will return a list with all the
341  * existing GT individual descriptions.
342  * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
343  * implementing graphics and/or media operations.
344  */
345 struct drm_xe_query_gt {
346 #define XE_QUERY_GT_TYPE_MAIN		0
347 #define XE_QUERY_GT_TYPE_REMOTE		1
348 #define XE_QUERY_GT_TYPE_MEDIA		2
349 	/** @type: GT type: Main, Remote, or Media */
350 	__u16 type;
351 	/** @gt_id: Unique ID of this GT within the PCI Device */
352 	__u16 gt_id;
353 	/** @clock_freq: A clock frequency for timestamp */
354 	__u32 clock_freq;
355 	/**
356 	 * @native_mem_regions: Bit mask of instances from
357 	 * drm_xe_query_mem_usage that lives on the same GPU/Tile and have
358 	 * direct access.
359 	 */
360 	__u64 native_mem_regions;
361 	/**
362 	 * @slow_mem_regions: Bit mask of instances from
363 	 * drm_xe_query_mem_usage that this GT can indirectly access, although
364 	 * they live on a different GPU/Tile.
365 	 */
366 	__u64 slow_mem_regions;
367 	/**
368 	 * @inaccessible_mem_regions: Bit mask of instances from
369 	 * drm_xe_query_mem_usage that is not accessible by this GT at all.
370 	 */
371 	__u64 inaccessible_mem_regions;
372 	/** @reserved: Reserved */
373 	__u64 reserved[8];
374 };
375 
376 /**
377  * struct drm_xe_query_gt_list - A list with GT description items.
378  *
379  * If a query is made with a struct drm_xe_device_query where .query
380  * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
381  * drm_xe_query_gt_list in .data.
382  */
383 struct drm_xe_query_gt_list {
384 	/** @num_gt: number of GT items returned in gt_list */
385 	__u32 num_gt;
386 	/** @pad: MBZ */
387 	__u32 pad;
388 	/** @gt_list: The GT list returned for this device */
389 	struct drm_xe_query_gt gt_list[];
390 };
391 
392 /**
393  * struct drm_xe_query_topology_mask - describe the topology mask of a GT
394  *
395  * This is the hardware topology which reflects the internal physical
396  * structure of the GPU.
397  *
398  * If a query is made with a struct drm_xe_device_query where .query
399  * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
400  * struct drm_xe_query_topology_mask in .data.
401  */
402 struct drm_xe_query_topology_mask {
403 	/** @gt_id: GT ID the mask is associated with */
404 	__u16 gt_id;
405 
406 	/*
407 	 * To query the mask of Dual Sub Slices (DSS) available for geometry
408 	 * operations. For example a query response containing the following
409 	 * in mask:
410 	 *   DSS_GEOMETRY    ff ff ff ff 00 00 00 00
411 	 * means 32 DSS are available for geometry.
412 	 */
413 #define XE_TOPO_DSS_GEOMETRY	(1 << 0)
414 	/*
415 	 * To query the mask of Dual Sub Slices (DSS) available for compute
416 	 * operations. For example a query response containing the following
417 	 * in mask:
418 	 *   DSS_COMPUTE    ff ff ff ff 00 00 00 00
419 	 * means 32 DSS are available for compute.
420 	 */
421 #define XE_TOPO_DSS_COMPUTE	(1 << 1)
422 	/*
423 	 * To query the mask of Execution Units (EU) available per Dual Sub
424 	 * Slices (DSS). For example a query response containing the following
425 	 * in mask:
426 	 *   EU_PER_DSS    ff ff 00 00 00 00 00 00
427 	 * means each DSS has 16 EU.
428 	 */
429 #define XE_TOPO_EU_PER_DSS	(1 << 2)
430 	/** @type: type of mask */
431 	__u16 type;
432 
433 	/** @num_bytes: number of bytes in requested mask */
434 	__u32 num_bytes;
435 
436 	/** @mask: little-endian mask of @num_bytes */
437 	__u8 mask[];
438 };
439 
440 /**
441  * struct drm_xe_device_query - main structure to query device information
442  *
443  * If size is set to 0, the driver fills it with the required size for the
444  * requested type of data to query. If size is equal to the required size,
445  * the queried information is copied into data.
446  *
447  * For example the following code snippet allows retrieving and printing
448  * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
449  *
450  * .. code-block:: C
451  *
452  *	struct drm_xe_engine_class_instance *hwe;
453  *	struct drm_xe_device_query query = {
454  *		.extensions = 0,
455  *		.query = DRM_XE_DEVICE_QUERY_ENGINES,
456  *		.size = 0,
457  *		.data = 0,
458  *	};
459  *	ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
460  *	hwe = malloc(query.size);
461  *	query.data = (uintptr_t)hwe;
462  *	ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
463  *	int num_engines = query.size / sizeof(*hwe);
464  *	for (int i = 0; i < num_engines; i++) {
465  *		printf("Engine %d: %s\n", i,
466  *			hwe[i].engine_class == DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
467  *			hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COPY ? "COPY":
468  *			hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
469  *			hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
470  *			hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
471  *			"UNKNOWN");
472  *	}
473  *	free(hwe);
474  */
475 struct drm_xe_device_query {
476 	/** @extensions: Pointer to the first extension struct, if any */
477 	__u64 extensions;
478 
479 #define DRM_XE_DEVICE_QUERY_ENGINES		0
480 #define DRM_XE_DEVICE_QUERY_MEM_USAGE		1
481 #define DRM_XE_DEVICE_QUERY_CONFIG		2
482 #define DRM_XE_DEVICE_QUERY_GT_LIST		3
483 #define DRM_XE_DEVICE_QUERY_HWCONFIG		4
484 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY		5
485 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES	6
486 	/** @query: The type of data to query */
487 	__u32 query;
488 
489 	/** @size: Size of the queried data */
490 	__u32 size;
491 
492 	/** @data: Queried data is placed here */
493 	__u64 data;
494 
495 	/** @reserved: Reserved */
496 	__u64 reserved[2];
497 };
498 
499 struct drm_xe_gem_create {
500 	/** @extensions: Pointer to the first extension struct, if any */
501 	__u64 extensions;
502 
503 	/**
504 	 * @size: Requested size for the object
505 	 *
506 	 * The (page-aligned) allocated size for the object will be returned.
507 	 */
508 	__u64 size;
509 
510 #define XE_GEM_CREATE_FLAG_DEFER_BACKING	(0x1 << 24)
511 #define XE_GEM_CREATE_FLAG_SCANOUT		(0x1 << 25)
512 /*
513  * When using VRAM as a possible placement, ensure that the corresponding VRAM
514  * allocation will always use the CPU accessible part of VRAM. This is important
515  * for small-bar systems (on full-bar systems this gets turned into a noop).
516  *
517  * Note: System memory can be used as an extra placement if the kernel should
518  * spill the allocation to system memory, if space can't be made available in
519  * the CPU accessible part of VRAM (giving the same behaviour as the i915
520  * interface, see I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
521  *
522  * Note: For clear-color CCS surfaces the kernel needs to read the clear-color
523  * value stored in the buffer, and on discrete platforms we need to use VRAM for
524  * display surfaces, therefore the kernel requires setting this flag for such
525  * objects, otherwise an error is thrown on small-bar systems.
526  */
527 #define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM	(0x1 << 26)
528 	/**
529 	 * @flags: Flags, currently a mask of memory instances of where BO can
530 	 * be placed
531 	 */
532 	__u32 flags;
533 
534 	/**
535 	 * @vm_id: Attached VM, if any
536 	 *
537 	 * If a VM is specified, this BO must:
538 	 *
539 	 *  1. Only ever be bound to that VM.
540 	 *  2. Cannot be exported as a PRIME fd.
541 	 */
542 	__u32 vm_id;
543 
544 	/**
545 	 * @handle: Returned handle for the object.
546 	 *
547 	 * Object handles are nonzero.
548 	 */
549 	__u32 handle;
550 
551 	/** @pad: MBZ */
552 	__u32 pad;
553 
554 	/** @reserved: Reserved */
555 	__u64 reserved[2];
556 };
557 
558 struct drm_xe_gem_mmap_offset {
559 	/** @extensions: Pointer to the first extension struct, if any */
560 	__u64 extensions;
561 
562 	/** @handle: Handle for the object being mapped. */
563 	__u32 handle;
564 
565 	/** @flags: Must be zero */
566 	__u32 flags;
567 
568 	/** @offset: The fake offset to use for subsequent mmap call */
569 	__u64 offset;
570 
571 	/** @reserved: Reserved */
572 	__u64 reserved[2];
573 };
574 
575 /** struct drm_xe_ext_set_property - XE set property extension */
576 struct drm_xe_ext_set_property {
577 	/** @base: base user extension */
578 	struct xe_user_extension base;
579 
580 	/** @property: property to set */
581 	__u32 property;
582 
583 	/** @pad: MBZ */
584 	__u32 pad;
585 
586 	/** @value: property value */
587 	__u64 value;
588 
589 	/** @reserved: Reserved */
590 	__u64 reserved[2];
591 };
592 
593 struct drm_xe_vm_create {
594 #define XE_VM_EXTENSION_SET_PROPERTY	0
595 	/** @extensions: Pointer to the first extension struct, if any */
596 	__u64 extensions;
597 
598 #define DRM_XE_VM_CREATE_SCRATCH_PAGE	(0x1 << 0)
599 #define DRM_XE_VM_CREATE_COMPUTE_MODE	(0x1 << 1)
600 #define DRM_XE_VM_CREATE_ASYNC_DEFAULT	(0x1 << 2)
601 #define DRM_XE_VM_CREATE_FAULT_MODE	(0x1 << 3)
602 	/** @flags: Flags */
603 	__u32 flags;
604 
605 	/** @vm_id: Returned VM ID */
606 	__u32 vm_id;
607 
608 	/** @reserved: Reserved */
609 	__u64 reserved[2];
610 };
611 
612 struct drm_xe_vm_destroy {
613 	/** @vm_id: VM ID */
614 	__u32 vm_id;
615 
616 	/** @pad: MBZ */
617 	__u32 pad;
618 
619 	/** @reserved: Reserved */
620 	__u64 reserved[2];
621 };
622 
623 struct drm_xe_vm_bind_op {
624 	/**
625 	 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
626 	 */
627 	__u32 obj;
628 
629 	/** @pad: MBZ */
630 	__u32 pad;
631 
632 	union {
633 		/**
634 		 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
635 		 * ignored for unbind
636 		 */
637 		__u64 obj_offset;
638 
639 		/** @userptr: user pointer to bind on */
640 		__u64 userptr;
641 	};
642 
643 	/**
644 	 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
645 	 */
646 	__u64 range;
647 
648 	/** @addr: Address to operate on, MBZ for UNMAP_ALL */
649 	__u64 addr;
650 
651 	/**
652 	 * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
653 	 * only applies to creating new VMAs
654 	 */
655 	__u64 tile_mask;
656 
657 #define XE_VM_BIND_OP_MAP		0x0
658 #define XE_VM_BIND_OP_UNMAP		0x1
659 #define XE_VM_BIND_OP_MAP_USERPTR	0x2
660 #define XE_VM_BIND_OP_UNMAP_ALL		0x3
661 #define XE_VM_BIND_OP_PREFETCH		0x4
662 	/** @op: Bind operation to perform */
663 	__u32 op;
664 
665 #define XE_VM_BIND_FLAG_READONLY	(0x1 << 0)
666 #define XE_VM_BIND_FLAG_ASYNC		(0x1 << 1)
667 	/*
668 	 * Valid on a faulting VM only, do the MAP operation immediately rather
669 	 * than deferring the MAP to the page fault handler.
670 	 */
671 #define XE_VM_BIND_FLAG_IMMEDIATE	(0x1 << 2)
672 	/*
673 	 * When the NULL flag is set, the page tables are setup with a special
674 	 * bit which indicates writes are dropped and all reads return zero.  In
675 	 * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
676 	 * operations, the BO handle MBZ, and the BO offset MBZ. This flag is
677 	 * intended to implement VK sparse bindings.
678 	 */
679 #define XE_VM_BIND_FLAG_NULL		(0x1 << 3)
680 	/** @flags: Bind flags */
681 	__u32 flags;
682 
683 	/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
684 	__u32 region;
685 
686 	/** @reserved: Reserved */
687 	__u64 reserved[2];
688 };
689 
690 struct drm_xe_vm_bind {
691 	/** @extensions: Pointer to the first extension struct, if any */
692 	__u64 extensions;
693 
694 	/** @vm_id: The ID of the VM to bind to */
695 	__u32 vm_id;
696 
697 	/**
698 	 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
699 	 * and exec queue must have same vm_id. If zero, the default VM bind engine
700 	 * is used.
701 	 */
702 	__u32 exec_queue_id;
703 
704 	/** @num_binds: number of binds in this IOCTL */
705 	__u32 num_binds;
706 
707 	/** @pad: MBZ */
708 	__u32 pad;
709 
710 	union {
711 		/** @bind: used if num_binds == 1 */
712 		struct drm_xe_vm_bind_op bind;
713 
714 		/**
715 		 * @vector_of_binds: userptr to array of struct
716 		 * drm_xe_vm_bind_op if num_binds > 1
717 		 */
718 		__u64 vector_of_binds;
719 	};
720 
721 	/** @num_syncs: amount of syncs to wait on */
722 	__u32 num_syncs;
723 
724 	/** @pad2: MBZ */
725 	__u32 pad2;
726 
727 	/** @syncs: pointer to struct drm_xe_sync array */
728 	__u64 syncs;
729 
730 	/** @reserved: Reserved */
731 	__u64 reserved[2];
732 };
733 
734 /**
735  * struct drm_xe_exec_queue_set_property - exec queue set property
736  *
737  * Same namespace for extensions as drm_xe_exec_queue_create
738  */
739 struct drm_xe_exec_queue_set_property {
740 	/** @extensions: Pointer to the first extension struct, if any */
741 	__u64 extensions;
742 
743 	/** @exec_queue_id: Exec queue ID */
744 	__u32 exec_queue_id;
745 
746 #define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY		0
747 #define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
748 #define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
749 #define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE		3
750 #define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT		4
751 #define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER		5
752 #define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY		6
753 #define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY	7
754 	/** @property: property to set */
755 	__u32 property;
756 
757 	/** @value: property value */
758 	__u64 value;
759 
760 	/** @reserved: Reserved */
761 	__u64 reserved[2];
762 };
763 
764 struct drm_xe_exec_queue_create {
765 #define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
766 	/** @extensions: Pointer to the first extension struct, if any */
767 	__u64 extensions;
768 
769 	/** @width: submission width (number BB per exec) for this exec queue */
770 	__u16 width;
771 
772 	/** @num_placements: number of valid placements for this exec queue */
773 	__u16 num_placements;
774 
775 	/** @vm_id: VM to use for this exec queue */
776 	__u32 vm_id;
777 
778 	/** @flags: MBZ */
779 	__u32 flags;
780 
781 	/** @exec_queue_id: Returned exec queue ID */
782 	__u32 exec_queue_id;
783 
784 	/**
785 	 * @instances: user pointer to a 2-d array of struct
786 	 * drm_xe_engine_class_instance
787 	 *
788 	 * length = width (i) * num_placements (j)
789 	 * index = j + i * width
790 	 */
791 	__u64 instances;
792 
793 	/** @reserved: Reserved */
794 	__u64 reserved[2];
795 };
796 
797 struct drm_xe_exec_queue_get_property {
798 	/** @extensions: Pointer to the first extension struct, if any */
799 	__u64 extensions;
800 
801 	/** @exec_queue_id: Exec queue ID */
802 	__u32 exec_queue_id;
803 
804 #define XE_EXEC_QUEUE_GET_PROPERTY_BAN			0
805 	/** @property: property to get */
806 	__u32 property;
807 
808 	/** @value: property value */
809 	__u64 value;
810 
811 	/** @reserved: Reserved */
812 	__u64 reserved[2];
813 };
814 
815 struct drm_xe_exec_queue_destroy {
816 	/** @exec_queue_id: Exec queue ID */
817 	__u32 exec_queue_id;
818 
819 	/** @pad: MBZ */
820 	__u32 pad;
821 
822 	/** @reserved: Reserved */
823 	__u64 reserved[2];
824 };
825 
826 struct drm_xe_sync {
827 	/** @extensions: Pointer to the first extension struct, if any */
828 	__u64 extensions;
829 
830 #define DRM_XE_SYNC_SYNCOBJ		0x0
831 #define DRM_XE_SYNC_TIMELINE_SYNCOBJ	0x1
832 #define DRM_XE_SYNC_DMA_BUF		0x2
833 #define DRM_XE_SYNC_USER_FENCE		0x3
834 #define DRM_XE_SYNC_SIGNAL		0x10
835 	__u32 flags;
836 
837 	/** @pad: MBZ */
838 	__u32 pad;
839 
840 	union {
841 		__u32 handle;
842 
843 		/**
844 		 * @addr: Address of user fence. When sync passed in via exec
845 		 * IOCTL this a GPU address in the VM. When sync passed in via
846 		 * VM bind IOCTL this is a user pointer. In either case, it is
847 		 * the users responsibility that this address is present and
848 		 * mapped when the user fence is signalled. Must be qword
849 		 * aligned.
850 		 */
851 		__u64 addr;
852 	};
853 
854 	__u64 timeline_value;
855 
856 	/** @reserved: Reserved */
857 	__u64 reserved[2];
858 };
859 
860 struct drm_xe_exec {
861 	/** @extensions: Pointer to the first extension struct, if any */
862 	__u64 extensions;
863 
864 	/** @exec_queue_id: Exec queue ID for the batch buffer */
865 	__u32 exec_queue_id;
866 
867 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
868 	__u32 num_syncs;
869 
870 	/** @syncs: Pointer to struct drm_xe_sync array. */
871 	__u64 syncs;
872 
873 	/**
874 	 * @address: address of batch buffer if num_batch_buffer == 1 or an
875 	 * array of batch buffer addresses
876 	 */
877 	__u64 address;
878 
879 	/**
880 	 * @num_batch_buffer: number of batch buffer in this exec, must match
881 	 * the width of the engine
882 	 */
883 	__u16 num_batch_buffer;
884 
885 	/** @pad: MBZ */
886 	__u16 pad[3];
887 
888 	/** @reserved: Reserved */
889 	__u64 reserved[2];
890 };
891 
892 /**
893  * struct drm_xe_wait_user_fence - wait user fence
894  *
895  * Wait on user fence, XE will wake-up on every HW engine interrupt in the
896  * instances list and check if user fence is complete::
897  *
898  *	(*addr & MASK) OP (VALUE & MASK)
899  *
900  * Returns to user on user fence completion or timeout.
901  */
902 struct drm_xe_wait_user_fence {
903 	/** @extensions: Pointer to the first extension struct, if any */
904 	__u64 extensions;
905 
906 	/**
907 	 * @addr: user pointer address to wait on, must qword aligned
908 	 */
909 	__u64 addr;
910 
911 #define DRM_XE_UFENCE_WAIT_EQ	0
912 #define DRM_XE_UFENCE_WAIT_NEQ	1
913 #define DRM_XE_UFENCE_WAIT_GT	2
914 #define DRM_XE_UFENCE_WAIT_GTE	3
915 #define DRM_XE_UFENCE_WAIT_LT	4
916 #define DRM_XE_UFENCE_WAIT_LTE	5
917 	/** @op: wait operation (type of comparison) */
918 	__u16 op;
919 
920 #define DRM_XE_UFENCE_WAIT_SOFT_OP	(1 << 0)	/* e.g. Wait on VM bind */
921 #define DRM_XE_UFENCE_WAIT_ABSTIME	(1 << 1)
922 	/** @flags: wait flags */
923 	__u16 flags;
924 
925 	/** @pad: MBZ */
926 	__u32 pad;
927 
928 	/** @value: compare value */
929 	__u64 value;
930 
931 #define DRM_XE_UFENCE_WAIT_U8		0xffu
932 #define DRM_XE_UFENCE_WAIT_U16		0xffffu
933 #define DRM_XE_UFENCE_WAIT_U32		0xffffffffu
934 #define DRM_XE_UFENCE_WAIT_U64		0xffffffffffffffffu
935 	/** @mask: comparison mask */
936 	__u64 mask;
937 	/**
938 	 * @timeout: how long to wait before bailing, value in nanoseconds.
939 	 * Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
940 	 * it contains timeout expressed in nanoseconds to wait (fence will
941 	 * expire at now() + timeout).
942 	 * When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
943 	 * will end at timeout (uses system MONOTONIC_CLOCK).
944 	 * Passing negative timeout leads to neverending wait.
945 	 *
946 	 * On relative timeout this value is updated with timeout left
947 	 * (for restarting the call in case of signal delivery).
948 	 * On absolute timeout this value stays intact (restarted call still
949 	 * expire at the same point of time).
950 	 */
951 	__s64 timeout;
952 
953 	/**
954 	 * @num_engines: number of engine instances to wait on, must be zero
955 	 * when DRM_XE_UFENCE_WAIT_SOFT_OP set
956 	 */
957 	__u64 num_engines;
958 
959 	/**
960 	 * @instances: user pointer to array of drm_xe_engine_class_instance to
961 	 * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
962 	 */
963 	__u64 instances;
964 
965 	/** @reserved: Reserved */
966 	__u64 reserved[2];
967 };
968 
969 struct drm_xe_vm_madvise {
970 	/** @extensions: Pointer to the first extension struct, if any */
971 	__u64 extensions;
972 
973 	/** @vm_id: The ID VM in which the VMA exists */
974 	__u32 vm_id;
975 
976 	/** @pad: MBZ */
977 	__u32 pad;
978 
979 	/** @range: Number of bytes in the VMA */
980 	__u64 range;
981 
982 	/** @addr: Address of the VMA to operation on */
983 	__u64 addr;
984 
985 	/*
986 	 * Setting the preferred location will trigger a migrate of the VMA
987 	 * backing store to new location if the backing store is already
988 	 * allocated.
989 	 *
990 	 * For DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS usage, see enum
991 	 * drm_xe_memory_class.
992 	 */
993 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS	0
994 #define DRM_XE_VM_MADVISE_PREFERRED_GT		1
995 	/*
996 	 * In this case lower 32 bits are mem class, upper 32 are GT.
997 	 * Combination provides a single IOCTL plus migrate VMA to preferred
998 	 * location.
999 	 */
1000 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS_GT	2
1001 	/*
1002 	 * The CPU will do atomic memory operations to this VMA. Must be set on
1003 	 * some devices for atomics to behave correctly.
1004 	 */
1005 #define DRM_XE_VM_MADVISE_CPU_ATOMIC		3
1006 	/*
1007 	 * The device will do atomic memory operations to this VMA. Must be set
1008 	 * on some devices for atomics to behave correctly.
1009 	 */
1010 #define DRM_XE_VM_MADVISE_DEVICE_ATOMIC		4
1011 	/*
1012 	 * Priority WRT to eviction (moving from preferred memory location due
1013 	 * to memory pressure). The lower the priority, the more likely to be
1014 	 * evicted.
1015 	 */
1016 #define DRM_XE_VM_MADVISE_PRIORITY		5
1017 #define		DRM_XE_VMA_PRIORITY_LOW		0
1018 		/* Default */
1019 #define		DRM_XE_VMA_PRIORITY_NORMAL	1
1020 		/* Must be user with elevated privileges */
1021 #define		DRM_XE_VMA_PRIORITY_HIGH	2
1022 	/* Pin the VMA in memory, must be user with elevated privileges */
1023 #define DRM_XE_VM_MADVISE_PIN			6
1024 	/** @property: property to set */
1025 	__u32 property;
1026 
1027 	/** @pad2: MBZ */
1028 	__u32 pad2;
1029 
1030 	/** @value: property value */
1031 	__u64 value;
1032 
1033 	/** @reserved: Reserved */
1034 	__u64 reserved[2];
1035 };
1036 
1037 /**
1038  * DOC: XE PMU event config IDs
1039  *
1040  * Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h
1041  * in 'struct perf_event_attr' as part of perf_event_open syscall to read a
1042  * particular event.
1043  *
1044  * For example to open the XE_PMU_INTERRUPTS(0):
1045  *
1046  * .. code-block:: C
1047  *
1048  *	struct perf_event_attr attr;
1049  *	long long count;
1050  *	int cpu = 0;
1051  *	int fd;
1052  *
1053  *	memset(&attr, 0, sizeof(struct perf_event_attr));
1054  *	attr.type = type; // eg: /sys/bus/event_source/devices/xe_0000_56_00.0/type
1055  *	attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
1056  *	attr.use_clockid = 1;
1057  *	attr.clockid = CLOCK_MONOTONIC;
1058  *	attr.config = XE_PMU_INTERRUPTS(0);
1059  *
1060  *	fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
1061  */
1062 
1063 /*
1064  * Top bits of every counter are GT id.
1065  */
1066 #define __XE_PMU_GT_SHIFT (56)
1067 
1068 #define ___XE_PMU_OTHER(gt, x) \
1069 	(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
1070 
1071 #define XE_PMU_INTERRUPTS(gt)			___XE_PMU_OTHER(gt, 0)
1072 #define XE_PMU_RENDER_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 1)
1073 #define XE_PMU_COPY_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 2)
1074 #define XE_PMU_MEDIA_GROUP_BUSY(gt)		___XE_PMU_OTHER(gt, 3)
1075 #define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt)	___XE_PMU_OTHER(gt, 4)
1076 
1077 #if defined(__cplusplus)
1078 }
1079 #endif
1080 
1081 #endif /* _UAPI_XE_DRM_H_ */
1082