xref: /linux/include/uapi/drm/xe_drm.h (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 /*
2  * Copyright 2021 Intel Corporation. All Rights Reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #ifndef _UAPI_XE_DRM_H_
27 #define _UAPI_XE_DRM_H_
28 
29 #include "drm.h"
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /* Please note that modifications to all structs defined here are
36  * subject to backwards-compatibility constraints.
37  */
38 
39 /**
40  * struct xe_user_extension - Base class for defining a chain of extensions
41  *
42  * Many interfaces need to grow over time. In most cases we can simply
43  * extend the struct and have userspace pass in more data. Another option,
44  * as demonstrated by Vulkan's approach to providing extensions for forward
45  * and backward compatibility, is to use a list of optional structs to
46  * provide those extra details.
47  *
48  * The key advantage to using an extension chain is that it allows us to
49  * redefine the interface more easily than an ever growing struct of
50  * increasing complexity, and for large parts of that interface to be
51  * entirely optional. The downside is more pointer chasing; chasing across
52  * the __user boundary with pointers encapsulated inside u64.
53  *
54  * Example chaining:
55  *
56  * .. code-block:: C
57  *
58  *	struct xe_user_extension ext3 {
59  *		.next_extension = 0, // end
60  *		.name = ...,
61  *	};
62  *	struct xe_user_extension ext2 {
63  *		.next_extension = (uintptr_t)&ext3,
64  *		.name = ...,
65  *	};
66  *	struct xe_user_extension ext1 {
67  *		.next_extension = (uintptr_t)&ext2,
68  *		.name = ...,
69  *	};
70  *
71  * Typically the struct xe_user_extension would be embedded in some uAPI
72  * struct, and in this case we would feed it the head of the chain(i.e ext1),
73  * which would then apply all of the above extensions.
74  *
75  */
76 struct xe_user_extension {
77 	/**
78 	 * @next_extension:
79 	 *
80 	 * Pointer to the next struct xe_user_extension, or zero if the end.
81 	 */
82 	__u64 next_extension;
83 	/**
84 	 * @name: Name of the extension.
85 	 *
86 	 * Note that the name here is just some integer.
87 	 *
88 	 * Also note that the name space for this is not global for the whole
89 	 * driver, but rather its scope/meaning is limited to the specific piece
90 	 * of uAPI which has embedded the struct xe_user_extension.
91 	 */
92 	__u32 name;
93 	/**
94 	 * @pad: MBZ
95 	 *
96 	 * All undefined bits must be zero.
97 	 */
98 	__u32 pad;
99 };
100 
101 /*
102  * xe specific ioctls.
103  *
104  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
105  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
106  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
107  */
108 #define DRM_XE_DEVICE_QUERY		0x00
109 #define DRM_XE_GEM_CREATE		0x01
110 #define DRM_XE_GEM_MMAP_OFFSET		0x02
111 #define DRM_XE_VM_CREATE		0x03
112 #define DRM_XE_VM_DESTROY		0x04
113 #define DRM_XE_VM_BIND			0x05
114 #define DRM_XE_ENGINE_CREATE		0x06
115 #define DRM_XE_ENGINE_DESTROY		0x07
116 #define DRM_XE_EXEC			0x08
117 #define DRM_XE_MMIO			0x09
118 #define DRM_XE_ENGINE_SET_PROPERTY	0x0a
119 #define DRM_XE_WAIT_USER_FENCE		0x0b
120 #define DRM_XE_VM_MADVISE		0x0c
121 #define DRM_XE_ENGINE_GET_PROPERTY	0x0d
122 
123 /* Must be kept compact -- no holes */
124 #define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
125 #define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
126 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
127 #define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
128 #define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
129 #define DRM_IOCTL_XE_VM_BIND			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
130 #define DRM_IOCTL_XE_ENGINE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
131 #define DRM_IOCTL_XE_ENGINE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_GET_PROPERTY, struct drm_xe_engine_get_property)
132 #define DRM_IOCTL_XE_ENGINE_DESTROY		DRM_IOW( DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
133 #define DRM_IOCTL_XE_EXEC			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
134 #define DRM_IOCTL_XE_MMIO			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
135 #define DRM_IOCTL_XE_ENGINE_SET_PROPERTY	DRM_IOW( DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
136 #define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
137 #define DRM_IOCTL_XE_VM_MADVISE			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
138 
139 struct drm_xe_engine_class_instance {
140 	__u16 engine_class;
141 
142 #define DRM_XE_ENGINE_CLASS_RENDER		0
143 #define DRM_XE_ENGINE_CLASS_COPY		1
144 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE	2
145 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE	3
146 #define DRM_XE_ENGINE_CLASS_COMPUTE		4
147 	/*
148 	 * Kernel only class (not actual hardware engine class). Used for
149 	 * creating ordered queues of VM bind operations.
150 	 */
151 #define DRM_XE_ENGINE_CLASS_VM_BIND		5
152 
153 	__u16 engine_instance;
154 	__u16 gt_id;
155 };
156 
157 #define XE_MEM_REGION_CLASS_SYSMEM	0
158 #define XE_MEM_REGION_CLASS_VRAM	1
159 
160 struct drm_xe_query_mem_usage {
161 	__u32 num_regions;
162 	__u32 pad;
163 
164 	struct drm_xe_query_mem_region {
165 		__u16 mem_class;
166 		__u16 instance;	/* unique ID even among different classes */
167 		__u32 pad;
168 		__u32 min_page_size;
169 		__u32 max_page_size;
170 		__u64 total_size;
171 		__u64 used;
172 		__u64 reserved[8];
173 	} regions[];
174 };
175 
176 struct drm_xe_query_config {
177 	__u32 num_params;
178 	__u32 pad;
179 #define XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
180 #define XE_QUERY_CONFIG_FLAGS			1
181 	#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM		(0x1 << 0)
182 	#define XE_QUERY_CONFIG_FLAGS_USE_GUC		(0x1 << 1)
183 #define XE_QUERY_CONFIG_MIN_ALIGNEMENT		2
184 #define XE_QUERY_CONFIG_VA_BITS			3
185 #define XE_QUERY_CONFIG_GT_COUNT		4
186 #define XE_QUERY_CONFIG_MEM_REGION_COUNT	5
187 #define XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY	6
188 #define XE_QUERY_CONFIG_NUM_PARAM		XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY + 1
189 	__u64 info[];
190 };
191 
192 struct drm_xe_query_gts {
193 	__u32 num_gt;
194 	__u32 pad;
195 
196 	/*
197 	 * TODO: Perhaps info about every mem region relative to this GT? e.g.
198 	 * bandwidth between this GT and remote region?
199 	 */
200 
201 	struct drm_xe_query_gt {
202 #define XE_QUERY_GT_TYPE_MAIN		0
203 #define XE_QUERY_GT_TYPE_REMOTE		1
204 #define XE_QUERY_GT_TYPE_MEDIA		2
205 		__u16 type;
206 		__u16 instance;
207 		__u32 clock_freq;
208 		__u64 features;
209 		__u64 native_mem_regions;	/* bit mask of instances from drm_xe_query_mem_usage */
210 		__u64 slow_mem_regions;		/* bit mask of instances from drm_xe_query_mem_usage */
211 		__u64 inaccessible_mem_regions;	/* bit mask of instances from drm_xe_query_mem_usage */
212 		__u64 reserved[8];
213 	} gts[];
214 };
215 
216 struct drm_xe_query_topology_mask {
217 	/** @gt_id: GT ID the mask is associated with */
218 	__u16 gt_id;
219 
220 	/** @type: type of mask */
221 	__u16 type;
222 #define XE_TOPO_DSS_GEOMETRY	(1 << 0)
223 #define XE_TOPO_DSS_COMPUTE	(1 << 1)
224 #define XE_TOPO_EU_PER_DSS	(1 << 2)
225 
226 	/** @num_bytes: number of bytes in requested mask */
227 	__u32 num_bytes;
228 
229 	/** @mask: little-endian mask of @num_bytes */
230 	__u8 mask[];
231 };
232 
233 struct drm_xe_device_query {
234 	/** @extensions: Pointer to the first extension struct, if any */
235 	__u64 extensions;
236 
237 	/** @query: The type of data to query */
238 	__u32 query;
239 
240 #define DRM_XE_DEVICE_QUERY_ENGINES	0
241 #define DRM_XE_DEVICE_QUERY_MEM_USAGE	1
242 #define DRM_XE_DEVICE_QUERY_CONFIG	2
243 #define DRM_XE_DEVICE_QUERY_GTS		3
244 #define DRM_XE_DEVICE_QUERY_HWCONFIG	4
245 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY	5
246 
247 	/** @size: Size of the queried data */
248 	__u32 size;
249 
250 	/** @data: Queried data is placed here */
251 	__u64 data;
252 
253 	/** @reserved: Reserved */
254 	__u64 reserved[2];
255 };
256 
257 struct drm_xe_gem_create {
258 	/** @extensions: Pointer to the first extension struct, if any */
259 	__u64 extensions;
260 
261 	/**
262 	 * @size: Requested size for the object
263 	 *
264 	 * The (page-aligned) allocated size for the object will be returned.
265 	 */
266 	__u64 size;
267 
268 	/**
269 	 * @flags: Flags, currently a mask of memory instances of where BO can
270 	 * be placed
271 	 */
272 #define XE_GEM_CREATE_FLAG_DEFER_BACKING	(0x1 << 24)
273 #define XE_GEM_CREATE_FLAG_SCANOUT		(0x1 << 25)
274 	__u32 flags;
275 
276 	/**
277 	 * @vm_id: Attached VM, if any
278 	 *
279 	 * If a VM is specified, this BO must:
280 	 *
281 	 *  1. Only ever be bound to that VM.
282 	 *
283 	 *  2. Cannot be exported as a PRIME fd.
284 	 */
285 	__u32 vm_id;
286 
287 	/**
288 	 * @handle: Returned handle for the object.
289 	 *
290 	 * Object handles are nonzero.
291 	 */
292 	__u32 handle;
293 
294 	/** @pad: MBZ */
295 	__u32 pad;
296 
297 	/** @reserved: Reserved */
298 	__u64 reserved[2];
299 };
300 
301 struct drm_xe_gem_mmap_offset {
302 	/** @extensions: Pointer to the first extension struct, if any */
303 	__u64 extensions;
304 
305 	/** @handle: Handle for the object being mapped. */
306 	__u32 handle;
307 
308 	/** @flags: Must be zero */
309 	__u32 flags;
310 
311 	/** @offset: The fake offset to use for subsequent mmap call */
312 	__u64 offset;
313 
314 	/** @reserved: Reserved */
315 	__u64 reserved[2];
316 };
317 
318 /**
319  * struct drm_xe_vm_bind_op_error_capture - format of VM bind op error capture
320  */
321 struct drm_xe_vm_bind_op_error_capture {
322 	/** @error: errno that occured */
323 	__s32 error;
324 	/** @op: operation that encounter an error */
325 	__u32 op;
326 	/** @addr: address of bind op */
327 	__u64 addr;
328 	/** @size: size of bind */
329 	__u64 size;
330 };
331 
332 /** struct drm_xe_ext_vm_set_property - VM set property extension */
333 struct drm_xe_ext_vm_set_property {
334 	/** @base: base user extension */
335 	struct xe_user_extension base;
336 
337 	/** @property: property to set */
338 #define XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS		0
339 	__u32 property;
340 
341 	/** @pad: MBZ */
342 	__u32 pad;
343 
344 	/** @value: property value */
345 	__u64 value;
346 
347 	/** @reserved: Reserved */
348 	__u64 reserved[2];
349 };
350 
351 struct drm_xe_vm_create {
352 	/** @extensions: Pointer to the first extension struct, if any */
353 #define XE_VM_EXTENSION_SET_PROPERTY	0
354 	__u64 extensions;
355 
356 	/** @flags: Flags */
357 	__u32 flags;
358 
359 #define DRM_XE_VM_CREATE_SCRATCH_PAGE	(0x1 << 0)
360 #define DRM_XE_VM_CREATE_COMPUTE_MODE	(0x1 << 1)
361 #define DRM_XE_VM_CREATE_ASYNC_BIND_OPS	(0x1 << 2)
362 #define DRM_XE_VM_CREATE_FAULT_MODE	(0x1 << 3)
363 
364 	/** @vm_id: Returned VM ID */
365 	__u32 vm_id;
366 
367 	/** @reserved: Reserved */
368 	__u64 reserved[2];
369 };
370 
371 struct drm_xe_vm_destroy {
372 	/** @vm_id: VM ID */
373 	__u32 vm_id;
374 
375 	/** @pad: MBZ */
376 	__u32 pad;
377 
378 	/** @reserved: Reserved */
379 	__u64 reserved[2];
380 };
381 
382 struct drm_xe_vm_bind_op {
383 	/**
384 	 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
385 	 */
386 	__u32 obj;
387 
388 	/** @pad: MBZ */
389 	__u32 pad;
390 
391 	union {
392 		/**
393 		 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
394 		 * ignored for unbind
395 		 */
396 		__u64 obj_offset;
397 		/** @userptr: user pointer to bind on */
398 		__u64 userptr;
399 	};
400 
401 	/**
402 	 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
403 	 */
404 	__u64 range;
405 
406 	/** @addr: Address to operate on, MBZ for UNMAP_ALL */
407 	__u64 addr;
408 
409 	/**
410 	 * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
411 	 * only applies to creating new VMAs
412 	 */
413 	__u64 tile_mask;
414 
415 	/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
416 	__u32 op;
417 
418 	/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
419 	__u32 region;
420 
421 #define XE_VM_BIND_OP_MAP		0x0
422 #define XE_VM_BIND_OP_UNMAP		0x1
423 #define XE_VM_BIND_OP_MAP_USERPTR	0x2
424 #define XE_VM_BIND_OP_RESTART		0x3
425 #define XE_VM_BIND_OP_UNMAP_ALL		0x4
426 #define XE_VM_BIND_OP_PREFETCH		0x5
427 
428 #define XE_VM_BIND_FLAG_READONLY	(0x1 << 16)
429 	/*
430 	 * A bind ops completions are always async, hence the support for out
431 	 * sync. This flag indicates the allocation of the memory for new page
432 	 * tables and the job to program the pages tables is asynchronous
433 	 * relative to the IOCTL. That part of a bind operation can fail under
434 	 * memory pressure, the job in practice can't fail unless the system is
435 	 * totally shot.
436 	 *
437 	 * If this flag is clear and the IOCTL doesn't return an error, in
438 	 * practice the bind op is good and will complete.
439 	 *
440 	 * If this flag is set and doesn't return return an error, the bind op
441 	 * can still fail and recovery is needed. If configured, the bind op that
442 	 * caused the error will be captured in drm_xe_vm_bind_op_error_capture.
443 	 * Once the user sees the error (via a ufence +
444 	 * XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS), it should free memory
445 	 * via non-async unbinds, and then restart all queue'd async binds op via
446 	 * XE_VM_BIND_OP_RESTART. Or alternatively the user should destroy the
447 	 * VM.
448 	 *
449 	 * This flag is only allowed when DRM_XE_VM_CREATE_ASYNC_BIND_OPS is
450 	 * configured in the VM and must be set if the VM is configured with
451 	 * DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
452 	 */
453 #define XE_VM_BIND_FLAG_ASYNC		(0x1 << 17)
454 	/*
455 	 * Valid on a faulting VM only, do the MAP operation immediately rather
456 	 * than differing the MAP to the page fault handler.
457 	 */
458 #define XE_VM_BIND_FLAG_IMMEDIATE	(0x1 << 18)
459 
460 	/** @reserved: Reserved */
461 	__u64 reserved[2];
462 };
463 
464 struct drm_xe_vm_bind {
465 	/** @extensions: Pointer to the first extension struct, if any */
466 	__u64 extensions;
467 
468 	/** @vm_id: The ID of the VM to bind to */
469 	__u32 vm_id;
470 
471 	/**
472 	 * @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
473 	 * and engine must have same vm_id. If zero, the default VM bind engine
474 	 * is used.
475 	 */
476 	__u32 engine_id;
477 
478 	/** @num_binds: number of binds in this IOCTL */
479 	__u32 num_binds;
480 
481 	/** @pad: MBZ */
482 	__u32 pad;
483 
484 	union {
485 		/** @bind: used if num_binds == 1 */
486 		struct drm_xe_vm_bind_op bind;
487 		/**
488 		 * @vector_of_binds: userptr to array of struct
489 		 * drm_xe_vm_bind_op if num_binds > 1
490 		 */
491 		__u64 vector_of_binds;
492 	};
493 
494 	/** @num_syncs: amount of syncs to wait on */
495 	__u32 num_syncs;
496 
497 	/** @pad2: MBZ */
498 	__u32 pad2;
499 
500 	/** @syncs: pointer to struct drm_xe_sync array */
501 	__u64 syncs;
502 
503 	/** @reserved: Reserved */
504 	__u64 reserved[2];
505 };
506 
507 /** struct drm_xe_ext_engine_set_property - engine set property extension */
508 struct drm_xe_ext_engine_set_property {
509 	/** @base: base user extension */
510 	struct xe_user_extension base;
511 
512 	/** @property: property to set */
513 	__u32 property;
514 
515 	/** @pad: MBZ */
516 	__u32 pad;
517 
518 	/** @value: property value */
519 	__u64 value;
520 };
521 
522 /**
523  * struct drm_xe_engine_set_property - engine set property
524  *
525  * Same namespace for extensions as drm_xe_engine_create
526  */
527 struct drm_xe_engine_set_property {
528 	/** @extensions: Pointer to the first extension struct, if any */
529 	__u64 extensions;
530 
531 	/** @engine_id: Engine ID */
532 	__u32 engine_id;
533 
534 	/** @property: property to set */
535 #define XE_ENGINE_SET_PROPERTY_PRIORITY			0
536 #define XE_ENGINE_SET_PROPERTY_TIMESLICE		1
537 #define XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
538 	/*
539 	 * Long running or ULLS engine mode. DMA fences not allowed in this
540 	 * mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
541 	 * as a sanity check the UMD knows what it is doing. Can only be set at
542 	 * engine create time.
543 	 */
544 #define XE_ENGINE_SET_PROPERTY_COMPUTE_MODE		3
545 #define XE_ENGINE_SET_PROPERTY_PERSISTENCE		4
546 #define XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT		5
547 #define XE_ENGINE_SET_PROPERTY_ACC_TRIGGER		6
548 #define XE_ENGINE_SET_PROPERTY_ACC_NOTIFY		7
549 #define XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY		8
550 	__u32 property;
551 
552 	/** @value: property value */
553 	__u64 value;
554 
555 	/** @reserved: Reserved */
556 	__u64 reserved[2];
557 };
558 
559 struct drm_xe_engine_create {
560 	/** @extensions: Pointer to the first extension struct, if any */
561 #define XE_ENGINE_EXTENSION_SET_PROPERTY               0
562 	__u64 extensions;
563 
564 	/** @width: submission width (number BB per exec) for this engine */
565 	__u16 width;
566 
567 	/** @num_placements: number of valid placements for this engine */
568 	__u16 num_placements;
569 
570 	/** @vm_id: VM to use for this engine */
571 	__u32 vm_id;
572 
573 	/** @flags: MBZ */
574 	__u32 flags;
575 
576 	/** @engine_id: Returned engine ID */
577 	__u32 engine_id;
578 
579 	/**
580 	 * @instances: user pointer to a 2-d array of struct
581 	 * drm_xe_engine_class_instance
582 	 *
583 	 * length = width (i) * num_placements (j)
584 	 * index = j + i * width
585 	 */
586 	__u64 instances;
587 
588 	/** @reserved: Reserved */
589 	__u64 reserved[2];
590 };
591 
592 struct drm_xe_engine_get_property {
593 	/** @extensions: Pointer to the first extension struct, if any */
594 	__u64 extensions;
595 
596 	/** @engine_id: Engine ID */
597 	__u32 engine_id;
598 
599 	/** @property: property to get */
600 #define XE_ENGINE_GET_PROPERTY_BAN			0
601 	__u32 property;
602 
603 	/** @value: property value */
604 	__u64 value;
605 
606 	/** @reserved: Reserved */
607 	__u64 reserved[2];
608 };
609 
610 struct drm_xe_engine_destroy {
611 	/** @engine_id: Engine ID */
612 	__u32 engine_id;
613 
614 	/** @pad: MBZ */
615 	__u32 pad;
616 
617 	/** @reserved: Reserved */
618 	__u64 reserved[2];
619 };
620 
621 struct drm_xe_sync {
622 	/** @extensions: Pointer to the first extension struct, if any */
623 	__u64 extensions;
624 
625 	__u32 flags;
626 
627 #define DRM_XE_SYNC_SYNCOBJ		0x0
628 #define DRM_XE_SYNC_TIMELINE_SYNCOBJ	0x1
629 #define DRM_XE_SYNC_DMA_BUF		0x2
630 #define DRM_XE_SYNC_USER_FENCE		0x3
631 #define DRM_XE_SYNC_SIGNAL		0x10
632 
633 	/** @pad: MBZ */
634 	__u32 pad;
635 
636 	union {
637 		__u32 handle;
638 		/**
639 		 * @addr: Address of user fence. When sync passed in via exec
640 		 * IOCTL this a GPU address in the VM. When sync passed in via
641 		 * VM bind IOCTL this is a user pointer. In either case, it is
642 		 * the users responsibility that this address is present and
643 		 * mapped when the user fence is signalled. Must be qword
644 		 * aligned.
645 		 */
646 		__u64 addr;
647 	};
648 
649 	__u64 timeline_value;
650 
651 	/** @reserved: Reserved */
652 	__u64 reserved[2];
653 };
654 
655 struct drm_xe_exec {
656 	/** @extensions: Pointer to the first extension struct, if any */
657 	__u64 extensions;
658 
659 	/** @engine_id: Engine ID for the batch buffer */
660 	__u32 engine_id;
661 
662 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
663 	__u32 num_syncs;
664 
665 	/** @syncs: Pointer to struct drm_xe_sync array. */
666 	__u64 syncs;
667 
668 	/**
669 	  * @address: address of batch buffer if num_batch_buffer == 1 or an
670 	  * array of batch buffer addresses
671 	  */
672 	__u64 address;
673 
674 	/**
675 	 * @num_batch_buffer: number of batch buffer in this exec, must match
676 	 * the width of the engine
677 	 */
678 	__u16 num_batch_buffer;
679 
680 	/** @pad: MBZ */
681 	__u16 pad[3];
682 
683 	/** @reserved: Reserved */
684 	__u64 reserved[2];
685 };
686 
687 struct drm_xe_mmio {
688 	/** @extensions: Pointer to the first extension struct, if any */
689 	__u64 extensions;
690 
691 	__u32 addr;
692 
693 	__u32 flags;
694 
695 #define DRM_XE_MMIO_8BIT	0x0
696 #define DRM_XE_MMIO_16BIT	0x1
697 #define DRM_XE_MMIO_32BIT	0x2
698 #define DRM_XE_MMIO_64BIT	0x3
699 #define DRM_XE_MMIO_BITS_MASK	0x3
700 #define DRM_XE_MMIO_READ	0x4
701 #define DRM_XE_MMIO_WRITE	0x8
702 
703 	__u64 value;
704 
705 	/** @reserved: Reserved */
706 	__u64 reserved[2];
707 };
708 
709 /**
710  * struct drm_xe_wait_user_fence - wait user fence
711  *
712  * Wait on user fence, XE will wakeup on every HW engine interrupt in the
713  * instances list and check if user fence is complete:
714  * (*addr & MASK) OP (VALUE & MASK)
715  *
716  * Returns to user on user fence completion or timeout.
717  */
718 struct drm_xe_wait_user_fence {
719 	/** @extensions: Pointer to the first extension struct, if any */
720 	__u64 extensions;
721 	union {
722 		/**
723 		 * @addr: user pointer address to wait on, must qword aligned
724 		 */
725 		__u64 addr;
726 		/**
727 		 * @vm_id: The ID of the VM which encounter an error used with
728 		 * DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be clear.
729 		 */
730 		__u64 vm_id;
731 	};
732 	/** @op: wait operation (type of comparison) */
733 #define DRM_XE_UFENCE_WAIT_EQ	0
734 #define DRM_XE_UFENCE_WAIT_NEQ	1
735 #define DRM_XE_UFENCE_WAIT_GT	2
736 #define DRM_XE_UFENCE_WAIT_GTE	3
737 #define DRM_XE_UFENCE_WAIT_LT	4
738 #define DRM_XE_UFENCE_WAIT_LTE	5
739 	__u16 op;
740 	/** @flags: wait flags */
741 #define DRM_XE_UFENCE_WAIT_SOFT_OP	(1 << 0)	/* e.g. Wait on VM bind */
742 #define DRM_XE_UFENCE_WAIT_ABSTIME	(1 << 1)
743 #define DRM_XE_UFENCE_WAIT_VM_ERROR	(1 << 2)
744 	__u16 flags;
745 	/** @pad: MBZ */
746 	__u32 pad;
747 	/** @value: compare value */
748 	__u64 value;
749 	/** @mask: comparison mask */
750 #define DRM_XE_UFENCE_WAIT_U8		0xffu
751 #define DRM_XE_UFENCE_WAIT_U16		0xffffu
752 #define DRM_XE_UFENCE_WAIT_U32		0xffffffffu
753 #define DRM_XE_UFENCE_WAIT_U64		0xffffffffffffffffu
754 	__u64 mask;
755 	/** @timeout: how long to wait before bailing, value in jiffies */
756 	__s64 timeout;
757 	/**
758 	 * @num_engines: number of engine instances to wait on, must be zero
759 	 * when DRM_XE_UFENCE_WAIT_SOFT_OP set
760 	 */
761 	__u64 num_engines;
762 	/**
763 	 * @instances: user pointer to array of drm_xe_engine_class_instance to
764 	 * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
765 	 */
766 	__u64 instances;
767 
768 	/** @reserved: Reserved */
769 	__u64 reserved[2];
770 };
771 
772 struct drm_xe_vm_madvise {
773 	/** @extensions: Pointer to the first extension struct, if any */
774 	__u64 extensions;
775 
776 	/** @vm_id: The ID VM in which the VMA exists */
777 	__u32 vm_id;
778 
779 	/** @pad: MBZ */
780 	__u32 pad;
781 
782 	/** @range: Number of bytes in the VMA */
783 	__u64 range;
784 
785 	/** @addr: Address of the VMA to operation on */
786 	__u64 addr;
787 
788 	/*
789 	 * Setting the preferred location will trigger a migrate of the VMA
790 	 * backing store to new location if the backing store is already
791 	 * allocated.
792 	 */
793 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS	0
794 #define DRM_XE_VM_MADVISE_PREFERRED_GT		1
795 	/*
796 	 * In this case lower 32 bits are mem class, upper 32 are GT.
797 	 * Combination provides a single IOCTL plus migrate VMA to preferred
798 	 * location.
799 	 */
800 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS_GT	2
801 	/*
802 	 * The CPU will do atomic memory operations to this VMA. Must be set on
803 	 * some devices for atomics to behave correctly.
804 	 */
805 #define DRM_XE_VM_MADVISE_CPU_ATOMIC		3
806 	/*
807 	 * The device will do atomic memory operations to this VMA. Must be set
808 	 * on some devices for atomics to behave correctly.
809 	 */
810 #define DRM_XE_VM_MADVISE_DEVICE_ATOMIC		4
811 	/*
812 	 * Priority WRT to eviction (moving from preferred memory location due
813 	 * to memory pressure). The lower the priority, the more likely to be
814 	 * evicted.
815 	 */
816 #define DRM_XE_VM_MADVISE_PRIORITY		5
817 #define		DRM_XE_VMA_PRIORITY_LOW		0
818 #define		DRM_XE_VMA_PRIORITY_NORMAL	1	/* Default */
819 #define		DRM_XE_VMA_PRIORITY_HIGH	2	/* Must be elevated user */
820 	/* Pin the VMA in memory, must be elevated user */
821 #define DRM_XE_VM_MADVISE_PIN			6
822 
823 	/** @property: property to set */
824 	__u32 property;
825 
826 	/** @pad2: MBZ */
827 	__u32 pad2;
828 
829 	/** @value: property value */
830 	__u64 value;
831 
832 	/** @reserved: Reserved */
833 	__u64 reserved[2];
834 };
835 
836 #if defined(__cplusplus)
837 }
838 #endif
839 
840 #endif /* _UAPI_XE_DRM_H_ */
841