xref: /linux/include/uapi/drm/xe_drm.h (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 /*
2  * Copyright 2021 Intel Corporation. All Rights Reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #ifndef _UAPI_XE_DRM_H_
27 #define _UAPI_XE_DRM_H_
28 
29 #include "drm.h"
30 
31 #if defined(__cplusplus)
32 extern "C" {
33 #endif
34 
35 /* Please note that modifications to all structs defined here are
36  * subject to backwards-compatibility constraints.
37  */
38 
39 /**
40  * struct i915_user_extension - Base class for defining a chain of extensions
41  *
42  * Many interfaces need to grow over time. In most cases we can simply
43  * extend the struct and have userspace pass in more data. Another option,
44  * as demonstrated by Vulkan's approach to providing extensions for forward
45  * and backward compatibility, is to use a list of optional structs to
46  * provide those extra details.
47  *
48  * The key advantage to using an extension chain is that it allows us to
49  * redefine the interface more easily than an ever growing struct of
50  * increasing complexity, and for large parts of that interface to be
51  * entirely optional. The downside is more pointer chasing; chasing across
52  * the __user boundary with pointers encapsulated inside u64.
53  *
54  * Example chaining:
55  *
56  * .. code-block:: C
57  *
58  *	struct i915_user_extension ext3 {
59  *		.next_extension = 0, // end
60  *		.name = ...,
61  *	};
62  *	struct i915_user_extension ext2 {
63  *		.next_extension = (uintptr_t)&ext3,
64  *		.name = ...,
65  *	};
66  *	struct i915_user_extension ext1 {
67  *		.next_extension = (uintptr_t)&ext2,
68  *		.name = ...,
69  *	};
70  *
71  * Typically the struct i915_user_extension would be embedded in some uAPI
72  * struct, and in this case we would feed it the head of the chain(i.e ext1),
73  * which would then apply all of the above extensions.
74  *
75  */
76 struct xe_user_extension {
77 	/**
78 	 * @next_extension:
79 	 *
80 	 * Pointer to the next struct i915_user_extension, or zero if the end.
81 	 */
82 	__u64 next_extension;
83 	/**
84 	 * @name: Name of the extension.
85 	 *
86 	 * Note that the name here is just some integer.
87 	 *
88 	 * Also note that the name space for this is not global for the whole
89 	 * driver, but rather its scope/meaning is limited to the specific piece
90 	 * of uAPI which has embedded the struct i915_user_extension.
91 	 */
92 	__u32 name;
93 	/**
94 	 * @flags: MBZ
95 	 *
96 	 * All undefined bits must be zero.
97 	 */
98 	__u32 pad;
99 };
100 
101 /*
102  * i915 specific ioctls.
103  *
104  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
105  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
106  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
107  */
108 #define DRM_XE_DEVICE_QUERY		0x00
109 #define DRM_XE_GEM_CREATE		0x01
110 #define DRM_XE_GEM_MMAP_OFFSET		0x02
111 #define DRM_XE_VM_CREATE		0x03
112 #define DRM_XE_VM_DESTROY		0x04
113 #define DRM_XE_VM_BIND			0x05
114 #define DRM_XE_ENGINE_CREATE		0x06
115 #define DRM_XE_ENGINE_DESTROY		0x07
116 #define DRM_XE_EXEC			0x08
117 #define DRM_XE_MMIO			0x09
118 #define DRM_XE_ENGINE_SET_PROPERTY	0x0a
119 #define DRM_XE_WAIT_USER_FENCE		0x0b
120 #define DRM_XE_VM_MADVISE		0x0c
121 
122 /* Must be kept compact -- no holes */
123 #define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
124 #define DRM_IOCTL_XE_GEM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
125 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
126 #define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
127 #define DRM_IOCTL_XE_VM_DESTROY			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
128 #define DRM_IOCTL_XE_VM_BIND			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
129 #define DRM_IOCTL_XE_ENGINE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
130 #define DRM_IOCTL_XE_ENGINE_DESTROY		DRM_IOW( DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
131 #define DRM_IOCTL_XE_EXEC			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
132 #define DRM_IOCTL_XE_MMIO			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
133 #define DRM_IOCTL_XE_ENGINE_SET_PROPERTY	DRM_IOW( DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
134 #define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
135 #define DRM_IOCTL_XE_VM_MADVISE			DRM_IOW( DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
136 
137 struct drm_xe_engine_class_instance {
138 	__u16 engine_class;
139 
140 #define DRM_XE_ENGINE_CLASS_RENDER		0
141 #define DRM_XE_ENGINE_CLASS_COPY		1
142 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE	2
143 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE	3
144 #define DRM_XE_ENGINE_CLASS_COMPUTE		4
145 	/*
146 	 * Kernel only class (not actual hardware engine class). Used for
147 	 * creating ordered queues of VM bind operations.
148 	 */
149 #define DRM_XE_ENGINE_CLASS_VM_BIND		5
150 
151 	__u16 engine_instance;
152 	__u16 gt_id;
153 };
154 
155 #define XE_MEM_REGION_CLASS_SYSMEM	0
156 #define XE_MEM_REGION_CLASS_VRAM	1
157 
158 struct drm_xe_query_mem_usage {
159 	__u32 num_regions;
160 	__u32 pad;
161 
162 	struct drm_xe_query_mem_region {
163 		__u16 mem_class;
164 		__u16 instance;	/* unique ID even among different classes */
165 		__u32 pad;
166 		__u32 min_page_size;
167 		__u32 max_page_size;
168 		__u64 total_size;
169 		__u64 used;
170 		__u64 reserved[8];
171 	} regions[];
172 };
173 
174 struct drm_xe_query_config {
175 	__u32 num_params;
176 	__u32 pad;
177 #define XE_QUERY_CONFIG_REV_AND_DEVICE_ID	0
178 #define XE_QUERY_CONFIG_FLAGS			1
179 	#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM		(0x1 << 0)
180 	#define XE_QUERY_CONFIG_FLAGS_USE_GUC		(0x1 << 1)
181 #define XE_QUERY_CONFIG_MIN_ALIGNEMENT		2
182 #define XE_QUERY_CONFIG_VA_BITS			3
183 #define XE_QUERY_CONFIG_GT_COUNT		4
184 #define XE_QUERY_CONFIG_MEM_REGION_COUNT	5
185 #define XE_QUERY_CONFIG_NUM_PARAM		XE_QUERY_CONFIG_MEM_REGION_COUNT + 1
186 	__u64 info[];
187 };
188 
189 struct drm_xe_query_gts {
190 	__u32 num_gt;
191 	__u32 pad;
192 
193 	/*
194 	 * TODO: Perhaps info about every mem region relative to this GT? e.g.
195 	 * bandwidth between this GT and remote region?
196 	 */
197 
198 	struct drm_xe_query_gt {
199 #define XE_QUERY_GT_TYPE_MAIN		0
200 #define XE_QUERY_GT_TYPE_REMOTE		1
201 #define XE_QUERY_GT_TYPE_MEDIA		2
202 		__u16 type;
203 		__u16 instance;
204 		__u32 clock_freq;
205 		__u64 features;
206 		__u64 native_mem_regions;	/* bit mask of instances from drm_xe_query_mem_usage */
207 		__u64 slow_mem_regions;		/* bit mask of instances from drm_xe_query_mem_usage */
208 		__u64 inaccessible_mem_regions;	/* bit mask of instances from drm_xe_query_mem_usage */
209 		__u64 reserved[8];
210 	} gts[];
211 };
212 
213 struct drm_xe_query_topology_mask {
214 	/** @gt_id: GT ID the mask is associated with */
215 	__u16 gt_id;
216 
217 	/** @type: type of mask */
218 	__u16 type;
219 #define XE_TOPO_DSS_GEOMETRY	(1 << 0)
220 #define XE_TOPO_DSS_COMPUTE	(1 << 1)
221 #define XE_TOPO_EU_PER_DSS	(1 << 2)
222 
223 	/** @num_bytes: number of bytes in requested mask */
224 	__u32 num_bytes;
225 
226 	/** @mask: little-endian mask of @num_bytes */
227 	__u8 mask[];
228 };
229 
230 struct drm_xe_device_query {
231 	/** @extensions: Pointer to the first extension struct, if any */
232 	__u64 extensions;
233 
234 	/** @query: The type of data to query */
235 	__u32 query;
236 
237 #define DRM_XE_DEVICE_QUERY_ENGINES	0
238 #define DRM_XE_DEVICE_QUERY_MEM_USAGE	1
239 #define DRM_XE_DEVICE_QUERY_CONFIG	2
240 #define DRM_XE_DEVICE_QUERY_GTS		3
241 #define DRM_XE_DEVICE_QUERY_HWCONFIG	4
242 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY	5
243 
244 	/** @size: Size of the queried data */
245 	__u32 size;
246 
247 	/** @data: Queried data is placed here */
248 	__u64 data;
249 
250 	/** @reserved: Reserved */
251 	__u64 reserved[2];
252 };
253 
254 struct drm_xe_gem_create {
255 	/** @extensions: Pointer to the first extension struct, if any */
256 	__u64 extensions;
257 
258 	/**
259 	 * @size: Requested size for the object
260 	 *
261 	 * The (page-aligned) allocated size for the object will be returned.
262 	 */
263 	__u64 size;
264 
265 	/**
266 	 * @flags: Flags, currently a mask of memory instances of where BO can
267 	 * be placed
268 	 */
269 #define XE_GEM_CREATE_FLAG_DEFER_BACKING	(0x1 << 24)
270 #define XE_GEM_CREATE_FLAG_SCANOUT		(0x1 << 25)
271 	__u32 flags;
272 
273 	/**
274 	 * @vm_id: Attached VM, if any
275 	 *
276 	 * If a VM is specified, this BO must:
277 	 *
278 	 *  1. Only ever be bound to that VM.
279 	 *
280 	 *  2. Cannot be exported as a PRIME fd.
281 	 */
282 	__u32 vm_id;
283 
284 	/**
285 	 * @handle: Returned handle for the object.
286 	 *
287 	 * Object handles are nonzero.
288 	 */
289 	__u32 handle;
290 
291 	/** @reserved: Reserved */
292 	__u64 reserved[2];
293 };
294 
295 struct drm_xe_gem_mmap_offset {
296 	/** @extensions: Pointer to the first extension struct, if any */
297 	__u64 extensions;
298 
299 	/** @handle: Handle for the object being mapped. */
300 	__u32 handle;
301 
302 	/** @flags: Must be zero */
303 	__u32 flags;
304 
305 	/** @offset: The fake offset to use for subsequent mmap call */
306 	__u64 offset;
307 
308 	/** @reserved: Reserved */
309 	__u64 reserved[2];
310 };
311 
312 /**
313  * struct drm_xe_vm_bind_op_error_capture - format of VM bind op error capture
314  */
315 struct drm_xe_vm_bind_op_error_capture {
316 	/** @error: errno that occured */
317 	__s32 error;
318 	/** @op: operation that encounter an error */
319 	__u32 op;
320 	/** @addr: address of bind op */
321 	__u64 addr;
322 	/** @size: size of bind */
323 	__u64 size;
324 };
325 
326 /** struct drm_xe_ext_vm_set_property - VM set property extension */
327 struct drm_xe_ext_vm_set_property {
328 	/** @base: base user extension */
329 	struct xe_user_extension base;
330 
331 	/** @property: property to set */
332 #define XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS		0
333 	__u32 property;
334 
335 	/** @value: property value */
336 	__u64 value;
337 
338 	/** @reserved: Reserved */
339 	__u64 reserved[2];
340 };
341 
342 struct drm_xe_vm_create {
343 	/** @extensions: Pointer to the first extension struct, if any */
344 #define XE_VM_EXTENSION_SET_PROPERTY	0
345 	__u64 extensions;
346 
347 	/** @flags: Flags */
348 	__u32 flags;
349 
350 #define DRM_XE_VM_CREATE_SCRATCH_PAGE	(0x1 << 0)
351 #define DRM_XE_VM_CREATE_COMPUTE_MODE	(0x1 << 1)
352 #define DRM_XE_VM_CREATE_ASYNC_BIND_OPS	(0x1 << 2)
353 #define DRM_XE_VM_CREATE_FAULT_MODE	(0x1 << 3)
354 
355 	/** @vm_id: Returned VM ID */
356 	__u32 vm_id;
357 
358 	/** @reserved: Reserved */
359 	__u64 reserved[2];
360 };
361 
362 struct drm_xe_vm_destroy {
363 	/** @vm_id: VM ID */
364 	__u32 vm_id;
365 
366 	/** @pad: MBZ */
367 	__u32 pad;
368 
369 	/** @reserved: Reserved */
370 	__u64 reserved[2];
371 };
372 
373 struct drm_xe_vm_bind_op {
374 	/**
375 	 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
376 	 */
377 	__u32 obj;
378 
379 	union {
380 		/**
381 		 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
382 		 * ignored for unbind
383 		 */
384 		__u64 obj_offset;
385 		/** @userptr: user pointer to bind on */
386 		__u64 userptr;
387 	};
388 
389 	/**
390 	 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
391 	 */
392 	__u64 range;
393 
394 	/** @addr: Address to operate on, MBZ for UNMAP_ALL */
395 	__u64 addr;
396 
397 	/**
398 	 * @gt_mask: Mask for which GTs to create binds for, 0 == All GTs,
399 	 * only applies to creating new VMAs
400 	 */
401 	__u64 gt_mask;
402 
403 	/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
404 	__u32 op;
405 
406 	/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
407 	__u32 region;
408 
409 #define XE_VM_BIND_OP_MAP		0x0
410 #define XE_VM_BIND_OP_UNMAP		0x1
411 #define XE_VM_BIND_OP_MAP_USERPTR	0x2
412 #define XE_VM_BIND_OP_RESTART		0x3
413 #define XE_VM_BIND_OP_UNMAP_ALL		0x4
414 #define XE_VM_BIND_OP_PREFETCH		0x5
415 
416 #define XE_VM_BIND_FLAG_READONLY	(0x1 << 16)
417 	/*
418 	 * A bind ops completions are always async, hence the support for out
419 	 * sync. This flag indicates the allocation of the memory for new page
420 	 * tables and the job to program the pages tables is asynchronous
421 	 * relative to the IOCTL. That part of a bind operation can fail under
422 	 * memory pressure, the job in practice can't fail unless the system is
423 	 * totally shot.
424 	 *
425 	 * If this flag is clear and the IOCTL doesn't return an error, in
426 	 * practice the bind op is good and will complete.
427 	 *
428 	 * If this flag is set and doesn't return return an error, the bind op
429 	 * can still fail and recovery is needed. If configured, the bind op that
430 	 * caused the error will be captured in drm_xe_vm_bind_op_error_capture.
431 	 * Once the user sees the error (via a ufence +
432 	 * XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS), it should free memory
433 	 * via non-async unbinds, and then restart all queue'd async binds op via
434 	 * XE_VM_BIND_OP_RESTART. Or alternatively the user should destroy the
435 	 * VM.
436 	 *
437 	 * This flag is only allowed when DRM_XE_VM_CREATE_ASYNC_BIND_OPS is
438 	 * configured in the VM and must be set if the VM is configured with
439 	 * DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
440 	 */
441 #define XE_VM_BIND_FLAG_ASYNC		(0x1 << 17)
442 	/*
443 	 * Valid on a faulting VM only, do the MAP operation immediately rather
444 	 * than differing the MAP to the page fault handler.
445 	 */
446 #define XE_VM_BIND_FLAG_IMMEDIATE	(0x1 << 18)
447 
448 	/** @reserved: Reserved */
449 	__u64 reserved[2];
450 };
451 
452 struct drm_xe_vm_bind {
453 	/** @extensions: Pointer to the first extension struct, if any */
454 	__u64 extensions;
455 
456 	/** @vm_id: The ID of the VM to bind to */
457 	__u32 vm_id;
458 
459 	/**
460 	 * @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
461 	 * and engine must have same vm_id. If zero, the default VM bind engine
462 	 * is used.
463 	 */
464 	__u32 engine_id;
465 
466 	/** @num_binds: number of binds in this IOCTL */
467 	__u32 num_binds;
468 
469 	union {
470 		/** @bind: used if num_binds == 1 */
471 		struct drm_xe_vm_bind_op bind;
472 		/**
473 		 * @vector_of_binds: userptr to array of struct
474 		 * drm_xe_vm_bind_op if num_binds > 1
475 		 */
476 		__u64 vector_of_binds;
477 	};
478 
479 	/** @num_syncs: amount of syncs to wait on */
480 	__u32 num_syncs;
481 
482 	/** @syncs: pointer to struct drm_xe_sync array */
483 	__u64 syncs;
484 
485 	/** @reserved: Reserved */
486 	__u64 reserved[2];
487 };
488 
489 /** struct drm_xe_ext_engine_set_property - engine set property extension */
490 struct drm_xe_ext_engine_set_property {
491 	/** @base: base user extension */
492 	struct xe_user_extension base;
493 
494 	/** @property: property to set */
495 	__u32 property;
496 
497 	/** @value: property value */
498 	__u64 value;
499 };
500 
501 /**
502  * struct drm_xe_engine_set_property - engine set property
503  *
504  * Same namespace for extensions as drm_xe_engine_create
505  */
506 struct drm_xe_engine_set_property {
507 	/** @extensions: Pointer to the first extension struct, if any */
508 	__u64 extensions;
509 
510 	/** @engine_id: Engine ID */
511 	__u32 engine_id;
512 
513 	/** @property: property to set */
514 #define XE_ENGINE_PROPERTY_PRIORITY			0
515 #define XE_ENGINE_PROPERTY_TIMESLICE			1
516 #define XE_ENGINE_PROPERTY_PREEMPTION_TIMEOUT		2
517 	/*
518 	 * Long running or ULLS engine mode. DMA fences not allowed in this
519 	 * mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
520 	 * as a sanity check the UMD knows what it is doing. Can only be set at
521 	 * engine create time.
522 	 */
523 #define XE_ENGINE_PROPERTY_COMPUTE_MODE			3
524 #define XE_ENGINE_PROPERTY_PERSISTENCE			4
525 #define XE_ENGINE_PROPERTY_JOB_TIMEOUT			5
526 #define XE_ENGINE_PROPERTY_ACC_TRIGGER			6
527 #define XE_ENGINE_PROPERTY_ACC_NOTIFY			7
528 #define XE_ENGINE_PROPERTY_ACC_GRANULARITY		8
529 	__u32 property;
530 
531 	/** @value: property value */
532 	__u64 value;
533 
534 	/** @reserved: Reserved */
535 	__u64 reserved[2];
536 };
537 
538 struct drm_xe_engine_create {
539 	/** @extensions: Pointer to the first extension struct, if any */
540 #define XE_ENGINE_EXTENSION_SET_PROPERTY               0
541 	__u64 extensions;
542 
543 	/** @width: submission width (number BB per exec) for this engine */
544 	__u16 width;
545 
546 	/** @num_placements: number of valid placements for this engine */
547 	__u16 num_placements;
548 
549 	/** @vm_id: VM to use for this engine */
550 	__u32 vm_id;
551 
552 	/** @flags: MBZ */
553 	__u32 flags;
554 
555 	/** @engine_id: Returned engine ID */
556 	__u32 engine_id;
557 
558 	/**
559 	 * @instances: user pointer to a 2-d array of struct
560 	 * drm_xe_engine_class_instance
561 	 *
562 	 * length = width (i) * num_placements (j)
563 	 * index = j + i * width
564 	 */
565 	__u64 instances;
566 
567 	/** @reserved: Reserved */
568 	__u64 reserved[2];
569 };
570 
571 struct drm_xe_engine_destroy {
572 	/** @vm_id: VM ID */
573 	__u32 engine_id;
574 
575 	/** @pad: MBZ */
576 	__u32 pad;
577 
578 	/** @reserved: Reserved */
579 	__u64 reserved[2];
580 };
581 
582 struct drm_xe_sync {
583 	/** @extensions: Pointer to the first extension struct, if any */
584 	__u64 extensions;
585 
586 	__u32 flags;
587 
588 #define DRM_XE_SYNC_SYNCOBJ		0x0
589 #define DRM_XE_SYNC_TIMELINE_SYNCOBJ	0x1
590 #define DRM_XE_SYNC_DMA_BUF		0x2
591 #define DRM_XE_SYNC_USER_FENCE		0x3
592 #define DRM_XE_SYNC_SIGNAL		0x10
593 
594 	union {
595 		__u32 handle;
596 		/**
597 		 * @addr: Address of user fence. When sync passed in via exec
598 		 * IOCTL this a GPU address in the VM. When sync passed in via
599 		 * VM bind IOCTL this is a user pointer. In either case, it is
600 		 * the users responsibility that this address is present and
601 		 * mapped when the user fence is signalled. Must be qword
602 		 * aligned.
603 		 */
604 		__u64 addr;
605 	};
606 
607 	__u64 timeline_value;
608 
609 	/** @reserved: Reserved */
610 	__u64 reserved[2];
611 };
612 
613 struct drm_xe_exec {
614 	/** @extensions: Pointer to the first extension struct, if any */
615 	__u64 extensions;
616 
617 	/** @engine_id: Engine ID for the batch buffer */
618 	__u32 engine_id;
619 
620 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
621 	__u32 num_syncs;
622 
623 	/** @syncs: Pointer to struct drm_xe_sync array. */
624 	__u64 syncs;
625 
626 	/**
627 	  * @address: address of batch buffer if num_batch_buffer == 1 or an
628 	  * array of batch buffer addresses
629 	  */
630 	__u64 address;
631 
632 	/**
633 	 * @num_batch_buffer: number of batch buffer in this exec, must match
634 	 * the width of the engine
635 	 */
636 	__u16 num_batch_buffer;
637 
638 	/** @reserved: Reserved */
639 	__u64 reserved[2];
640 };
641 
642 struct drm_xe_mmio {
643 	/** @extensions: Pointer to the first extension struct, if any */
644 	__u64 extensions;
645 
646 	__u32 addr;
647 
648 	__u32 flags;
649 
650 #define DRM_XE_MMIO_8BIT	0x0
651 #define DRM_XE_MMIO_16BIT	0x1
652 #define DRM_XE_MMIO_32BIT	0x2
653 #define DRM_XE_MMIO_64BIT	0x3
654 #define DRM_XE_MMIO_BITS_MASK	0x3
655 #define DRM_XE_MMIO_READ	0x4
656 #define DRM_XE_MMIO_WRITE	0x8
657 
658 	__u64 value;
659 
660 	/** @reserved: Reserved */
661 	__u64 reserved[2];
662 };
663 
664 /**
665  * struct drm_xe_wait_user_fence - wait user fence
666  *
667  * Wait on user fence, XE will wakeup on every HW engine interrupt in the
668  * instances list and check if user fence is complete:
669  * (*addr & MASK) OP (VALUE & MASK)
670  *
671  * Returns to user on user fence completion or timeout.
672  */
673 struct drm_xe_wait_user_fence {
674 	/** @extensions: Pointer to the first extension struct, if any */
675 	__u64 extensions;
676 	union {
677 		/**
678 		 * @addr: user pointer address to wait on, must qword aligned
679 		 */
680 		__u64 addr;
681 		/**
682 		 * @vm_id: The ID of the VM which encounter an error used with
683 		 * DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be clear.
684 		 */
685 		__u64 vm_id;
686 	};
687 	/** @op: wait operation (type of comparison) */
688 #define DRM_XE_UFENCE_WAIT_EQ	0
689 #define DRM_XE_UFENCE_WAIT_NEQ	1
690 #define DRM_XE_UFENCE_WAIT_GT	2
691 #define DRM_XE_UFENCE_WAIT_GTE	3
692 #define DRM_XE_UFENCE_WAIT_LT	4
693 #define DRM_XE_UFENCE_WAIT_LTE	5
694 	__u16 op;
695 	/** @flags: wait flags */
696 #define DRM_XE_UFENCE_WAIT_SOFT_OP	(1 << 0)	/* e.g. Wait on VM bind */
697 #define DRM_XE_UFENCE_WAIT_ABSTIME	(1 << 1)
698 #define DRM_XE_UFENCE_WAIT_VM_ERROR	(1 << 2)
699 	__u16 flags;
700 	/** @value: compare value */
701 	__u64 value;
702 	/** @mask: comparison mask */
703 #define DRM_XE_UFENCE_WAIT_U8		0xffu
704 #define DRM_XE_UFENCE_WAIT_U16		0xffffu
705 #define DRM_XE_UFENCE_WAIT_U32		0xffffffffu
706 #define DRM_XE_UFENCE_WAIT_U64		0xffffffffffffffffu
707 	__u64 mask;
708 	/** @timeout: how long to wait before bailing, value in jiffies */
709 	__s64 timeout;
710 	/**
711 	 * @num_engines: number of engine instances to wait on, must be zero
712 	 * when DRM_XE_UFENCE_WAIT_SOFT_OP set
713 	 */
714 	__u64 num_engines;
715 	/**
716 	 * @instances: user pointer to array of drm_xe_engine_class_instance to
717 	 * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
718 	 */
719 	__u64 instances;
720 
721 	/** @reserved: Reserved */
722 	__u64 reserved[2];
723 };
724 
725 struct drm_xe_vm_madvise {
726 	/** @extensions: Pointer to the first extension struct, if any */
727 	__u64 extensions;
728 
729 	/** @vm_id: The ID VM in which the VMA exists */
730 	__u32 vm_id;
731 
732 	/** @range: Number of bytes in the VMA */
733 	__u64 range;
734 
735 	/** @addr: Address of the VMA to operation on */
736 	__u64 addr;
737 
738 	/*
739 	 * Setting the preferred location will trigger a migrate of the VMA
740 	 * backing store to new location if the backing store is already
741 	 * allocated.
742 	 */
743 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS	0
744 #define DRM_XE_VM_MADVISE_PREFERRED_GT		1
745 	/*
746 	 * In this case lower 32 bits are mem class, upper 32 are GT.
747 	 * Combination provides a single IOCTL plus migrate VMA to preferred
748 	 * location.
749 	 */
750 #define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS_GT	2
751 	/*
752 	 * The CPU will do atomic memory operations to this VMA. Must be set on
753 	 * some devices for atomics to behave correctly.
754 	 */
755 #define DRM_XE_VM_MADVISE_CPU_ATOMIC		3
756 	/*
757 	 * The device will do atomic memory operations to this VMA. Must be set
758 	 * on some devices for atomics to behave correctly.
759 	 */
760 #define DRM_XE_VM_MADVISE_DEVICE_ATOMIC		4
761 	/*
762 	 * Priority WRT to eviction (moving from preferred memory location due
763 	 * to memory pressure). The lower the priority, the more likely to be
764 	 * evicted.
765 	 */
766 #define DRM_XE_VM_MADVISE_PRIORITY		5
767 #define		DRM_XE_VMA_PRIORITY_LOW		0
768 #define		DRM_XE_VMA_PRIORITY_NORMAL	1	/* Default */
769 #define		DRM_XE_VMA_PRIORITY_HIGH	2	/* Must be elevated user */
770 	/* Pin the VMA in memory, must be elevated user */
771 #define DRM_XE_VM_MADVISE_PIN			6
772 
773 	/** @property: property to set */
774 	__u32 property;
775 
776 	/** @value: property value */
777 	__u64 value;
778 
779 	/** @reserved: Reserved */
780 	__u64 reserved[2];
781 };
782 
783 #if defined(__cplusplus)
784 }
785 #endif
786 
787 #endif /* _UAPI_XE_DRM_H_ */
788