xref: /linux/drivers/gpu/drm/xe/xe_device_types.h (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  */
5 
6 #ifndef _XE_DEVICE_TYPES_H_
7 #define _XE_DEVICE_TYPES_H_
8 
9 #include <linux/pci.h>
10 
11 #include <drm/drm_device.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_pagemap.h>
14 #include <drm/ttm/ttm_device.h>
15 
16 #include "xe_devcoredump_types.h"
17 #include "xe_heci_gsc.h"
18 #include "xe_lmtt_types.h"
19 #include "xe_memirq_types.h"
20 #include "xe_oa_types.h"
21 #include "xe_platform_types.h"
22 #include "xe_pmu_types.h"
23 #include "xe_pt_types.h"
24 #include "xe_sriov_types.h"
25 #include "xe_step_types.h"
26 #include "xe_survivability_mode_types.h"
27 #include "xe_ttm_vram_mgr_types.h"
28 
29 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
30 #define TEST_VM_OPS_ERROR
31 #endif
32 
33 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
34 #include "intel_display_core.h"
35 #include "intel_display_device.h"
36 #endif
37 
38 struct xe_ggtt;
39 struct xe_pat_ops;
40 struct xe_pxp;
41 
42 #define XE_BO_INVALID_OFFSET	LONG_MAX
43 
44 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
45 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
46 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
47 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
48 #define IS_DGFX(xe) ((xe)->info.is_dgfx)
49 
50 #define XE_VRAM_FLAGS_NEED64K		BIT(0)
51 
52 #define XE_GT0		0
53 #define XE_GT1		1
54 #define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
55 
56 #define XE_MAX_ASID	(BIT(20))
57 
58 #define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step)	\
59 	((_xe)->info.platform == (_platform) &&			\
60 	 (_xe)->info.step.graphics >= (min_step) &&		\
61 	 (_xe)->info.step.graphics < (max_step))
62 #define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step)	\
63 	((_xe)->info.platform == (_platform) &&				\
64 	 (_xe)->info.subplatform == (sub) &&				\
65 	 (_xe)->info.step.graphics >= (min_step) &&			\
66 	 (_xe)->info.step.graphics < (max_step))
67 
68 #define tile_to_xe(tile__)								\
69 	_Generic(tile__,								\
70 		 const struct xe_tile * : (const struct xe_device *)((tile__)->xe),	\
71 		 struct xe_tile * : (tile__)->xe)
72 
73 /**
74  * struct xe_vram_region - memory region structure
75  * This is used to describe a memory region in xe
76  * device, such as HBM memory or CXL extension memory.
77  */
78 struct xe_vram_region {
79 	/** @io_start: IO start address of this VRAM instance */
80 	resource_size_t io_start;
81 	/**
82 	 * @io_size: IO size of this VRAM instance
83 	 *
84 	 * This represents how much of this VRAM we can access
85 	 * via the CPU through the VRAM BAR. This can be smaller
86 	 * than @usable_size, in which case only part of VRAM is CPU
87 	 * accessible (typically the first 256M). This
88 	 * configuration is known as small-bar.
89 	 */
90 	resource_size_t io_size;
91 	/** @dpa_base: This memory regions's DPA (device physical address) base */
92 	resource_size_t dpa_base;
93 	/**
94 	 * @usable_size: usable size of VRAM
95 	 *
96 	 * Usable size of VRAM excluding reserved portions
97 	 * (e.g stolen mem)
98 	 */
99 	resource_size_t usable_size;
100 	/**
101 	 * @actual_physical_size: Actual VRAM size
102 	 *
103 	 * Actual VRAM size including reserved portions
104 	 * (e.g stolen mem)
105 	 */
106 	resource_size_t actual_physical_size;
107 	/** @mapping: pointer to VRAM mappable space */
108 	void __iomem *mapping;
109 	/** @ttm: VRAM TTM manager */
110 	struct xe_ttm_vram_mgr ttm;
111 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
112 	/** @pagemap: Used to remap device memory as ZONE_DEVICE */
113 	struct dev_pagemap pagemap;
114 	/**
115 	 * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
116 	 * pages of this tile.
117 	 */
118 	struct drm_pagemap dpagemap;
119 	/**
120 	 * @hpa_base: base host physical address
121 	 *
122 	 * This is generated when remap device memory as ZONE_DEVICE
123 	 */
124 	resource_size_t hpa_base;
125 #endif
126 };
127 
128 /**
129  * struct xe_mmio - register mmio structure
130  *
131  * Represents an MMIO region that the CPU may use to access registers.  A
132  * region may share its IO map with other regions (e.g., all GTs within a
133  * tile share the same map with their parent tile, but represent different
134  * subregions of the overall IO space).
135  */
136 struct xe_mmio {
137 	/** @tile: Backpointer to tile, used for tracing */
138 	struct xe_tile *tile;
139 
140 	/** @regs: Map used to access registers. */
141 	void __iomem *regs;
142 
143 	/**
144 	 * @sriov_vf_gt: Backpointer to GT.
145 	 *
146 	 * This pointer is only set for GT MMIO regions and only when running
147 	 * as an SRIOV VF structure
148 	 */
149 	struct xe_gt *sriov_vf_gt;
150 
151 	/**
152 	 * @regs_size: Length of the register region within the map.
153 	 *
154 	 * The size of the iomap set in *regs is generally larger than the
155 	 * register mmio space since it includes unused regions and/or
156 	 * non-register regions such as the GGTT PTEs.
157 	 */
158 	size_t regs_size;
159 
160 	/** @adj_limit: adjust MMIO address if address is below this value */
161 	u32 adj_limit;
162 
163 	/** @adj_offset: offset to add to MMIO address when adjusting */
164 	u32 adj_offset;
165 };
166 
167 /**
168  * struct xe_tile - hardware tile structure
169  *
170  * From a driver perspective, a "tile" is effectively a complete GPU, containing
171  * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
172  *
173  * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
174  * device and designate one "root" tile as being responsible for external PCI
175  * communication.  PCI BAR0 exposes the GGTT and MMIO register space for each
176  * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
177  * with each tile similarly.  Device-wide interrupts can be enabled/disabled
178  * at the root tile, and the MSTR_TILE_INTR register will report which tiles
179  * have interrupts that need servicing.
180  */
181 struct xe_tile {
182 	/** @xe: Backpointer to tile's PCI device */
183 	struct xe_device *xe;
184 
185 	/** @id: ID of the tile */
186 	u8 id;
187 
188 	/**
189 	 * @primary_gt: Primary GT
190 	 */
191 	struct xe_gt *primary_gt;
192 
193 	/**
194 	 * @media_gt: Media GT
195 	 *
196 	 * Only present on devices with media version >= 13.
197 	 */
198 	struct xe_gt *media_gt;
199 
200 	/**
201 	 * @mmio: MMIO info for a tile.
202 	 *
203 	 * Each tile has its own 16MB space in BAR0, laid out as:
204 	 * * 0-4MB: registers
205 	 * * 4MB-8MB: reserved
206 	 * * 8MB-16MB: global GTT
207 	 */
208 	struct xe_mmio mmio;
209 
210 	/** @mem: memory management info for tile */
211 	struct {
212 		/**
213 		 * @mem.vram: VRAM info for tile.
214 		 *
215 		 * Although VRAM is associated with a specific tile, it can
216 		 * still be accessed by all tiles' GTs.
217 		 */
218 		struct xe_vram_region vram;
219 
220 		/** @mem.ggtt: Global graphics translation table */
221 		struct xe_ggtt *ggtt;
222 
223 		/**
224 		 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
225 		 *
226 		 * Media GT shares a pool with its primary GT.
227 		 */
228 		struct xe_sa_manager *kernel_bb_pool;
229 	} mem;
230 
231 	/** @sriov: tile level virtualization data */
232 	union {
233 		struct {
234 			/** @sriov.pf.lmtt: Local Memory Translation Table. */
235 			struct xe_lmtt lmtt;
236 		} pf;
237 		struct {
238 			/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
239 			struct xe_ggtt_node *ggtt_balloon[2];
240 		} vf;
241 	} sriov;
242 
243 	/** @memirq: Memory Based Interrupts. */
244 	struct xe_memirq memirq;
245 
246 	/** @pcode: tile's PCODE */
247 	struct {
248 		/** @pcode.lock: protecting tile's PCODE mailbox data */
249 		struct mutex lock;
250 	} pcode;
251 
252 	/** @migrate: Migration helper for vram blits and clearing */
253 	struct xe_migrate *migrate;
254 
255 	/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
256 	struct kobject *sysfs;
257 };
258 
259 /**
260  * struct xe_device - Top level struct of XE device
261  */
262 struct xe_device {
263 	/** @drm: drm device */
264 	struct drm_device drm;
265 
266 	/** @devcoredump: device coredump */
267 	struct xe_devcoredump devcoredump;
268 
269 	/** @info: device info */
270 	struct intel_device_info {
271 		/** @info.platform_name: platform name */
272 		const char *platform_name;
273 		/** @info.graphics_name: graphics IP name */
274 		const char *graphics_name;
275 		/** @info.media_name: media IP name */
276 		const char *media_name;
277 		/** @info.graphics_verx100: graphics IP version */
278 		u32 graphics_verx100;
279 		/** @info.media_verx100: media IP version */
280 		u32 media_verx100;
281 		/** @info.mem_region_mask: mask of valid memory regions */
282 		u32 mem_region_mask;
283 		/** @info.platform: XE platform enum */
284 		enum xe_platform platform;
285 		/** @info.subplatform: XE subplatform enum */
286 		enum xe_subplatform subplatform;
287 		/** @info.devid: device ID */
288 		u16 devid;
289 		/** @info.revid: device revision */
290 		u8 revid;
291 		/** @info.step: stepping information for each IP */
292 		struct xe_step_info step;
293 		/** @info.dma_mask_size: DMA address bits */
294 		u8 dma_mask_size;
295 		/** @info.vram_flags: Vram flags */
296 		u8 vram_flags;
297 		/** @info.tile_count: Number of tiles */
298 		u8 tile_count;
299 		/** @info.gt_count: Total number of GTs for entire device */
300 		u8 gt_count;
301 		/** @info.vm_max_level: Max VM level */
302 		u8 vm_max_level;
303 		/** @info.va_bits: Maximum bits of a virtual address */
304 		u8 va_bits;
305 
306 		/*
307 		 * Keep all flags below alphabetically sorted
308 		 */
309 
310 		/** @info.force_execlist: Forced execlist submission */
311 		u8 force_execlist:1;
312 		/** @info.has_asid: Has address space ID */
313 		u8 has_asid:1;
314 		/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
315 		u8 has_atomic_enable_pte_bit:1;
316 		/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
317 		u8 has_device_atomics_on_smem:1;
318 		/** @info.has_fan_control: Device supports fan control */
319 		u8 has_fan_control:1;
320 		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
321 		u8 has_flat_ccs:1;
322 		/** @info.has_heci_cscfi: device has heci cscfi */
323 		u8 has_heci_cscfi:1;
324 		/** @info.has_heci_gscfi: device has heci gscfi */
325 		u8 has_heci_gscfi:1;
326 		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
327 		u8 has_llc:1;
328 		/** @info.has_mbx_power_limits: Device has support to manage power limits using
329 		 * pcode mailbox commands.
330 		 */
331 		u8 has_mbx_power_limits:1;
332 		/** @info.has_pxp: Device has PXP support */
333 		u8 has_pxp:1;
334 		/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
335 		u8 has_range_tlb_invalidation:1;
336 		/** @info.has_sriov: Supports SR-IOV */
337 		u8 has_sriov:1;
338 		/** @info.has_usm: Device has unified shared memory support */
339 		u8 has_usm:1;
340 		/** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
341 		u8 has_64bit_timestamp:1;
342 		/** @info.is_dgfx: is discrete device */
343 		u8 is_dgfx:1;
344 		/** @info.needs_scratch: needs scratch page for oob prefetch to work */
345 		u8 needs_scratch:1;
346 		/**
347 		 * @info.probe_display: Probe display hardware.  If set to
348 		 * false, the driver will behave as if there is no display
349 		 * hardware present and will not try to read/write to it in any
350 		 * way.  The display hardware, if it exists, will not be
351 		 * exposed to userspace and will be left untouched in whatever
352 		 * state the firmware or bootloader left it in.
353 		 */
354 		u8 probe_display:1;
355 		/** @info.skip_guc_pc: Skip GuC based PM feature init */
356 		u8 skip_guc_pc:1;
357 		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
358 		u8 skip_mtcfg:1;
359 		/** @info.skip_pcode: skip access to PCODE uC */
360 		u8 skip_pcode:1;
361 	} info;
362 
363 	/** @survivability: survivability information for device */
364 	struct xe_survivability survivability;
365 
366 	/** @irq: device interrupt state */
367 	struct {
368 		/** @irq.lock: lock for processing irq's on this device */
369 		spinlock_t lock;
370 
371 		/** @irq.enabled: interrupts enabled on this device */
372 		atomic_t enabled;
373 
374 		/** @irq.msix: irq info for platforms that support MSI-X */
375 		struct {
376 			/** @irq.msix.nvec: number of MSI-X interrupts */
377 			u16 nvec;
378 			/** @irq.msix.indexes: used to allocate MSI-X indexes */
379 			struct xarray indexes;
380 		} msix;
381 	} irq;
382 
383 	/** @ttm: ttm device */
384 	struct ttm_device ttm;
385 
386 	/** @mmio: mmio info for device */
387 	struct {
388 		/** @mmio.size: size of MMIO space for device */
389 		size_t size;
390 		/** @mmio.regs: pointer to MMIO space for device */
391 		void __iomem *regs;
392 	} mmio;
393 
394 	/** @mem: memory info for device */
395 	struct {
396 		/** @mem.vram: VRAM info for device */
397 		struct xe_vram_region vram;
398 		/** @mem.sys_mgr: system TTM manager */
399 		struct ttm_resource_manager sys_mgr;
400 		/** @mem.sys_mgr: system memory shrinker. */
401 		struct xe_shrinker *shrinker;
402 	} mem;
403 
404 	/** @sriov: device level virtualization data */
405 	struct {
406 		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
407 		enum xe_sriov_mode __mode;
408 
409 		/** @sriov.pf: PF specific data */
410 		struct xe_device_pf pf;
411 		/** @sriov.vf: VF specific data */
412 		struct xe_device_vf vf;
413 
414 		/** @sriov.wq: workqueue used by the virtualization workers */
415 		struct workqueue_struct *wq;
416 	} sriov;
417 
418 	/** @usm: unified memory state */
419 	struct {
420 		/** @usm.asid: convert a ASID to VM */
421 		struct xarray asid_to_vm;
422 		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
423 		u32 next_asid;
424 		/** @usm.lock: protects UM state */
425 		struct rw_semaphore lock;
426 	} usm;
427 
428 	/** @pinned: pinned BO state */
429 	struct {
430 		/** @pinned.lock: protected pinned BO list state */
431 		spinlock_t lock;
432 		/** @pinned.early: early pinned lists */
433 		struct {
434 			/** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
435 			struct list_head kernel_bo_present;
436 			/** @pinned.early.evicted: pinned BO that have been evicted */
437 			struct list_head evicted;
438 		} early;
439 		/** @pinned.late: late pinned lists */
440 		struct {
441 			/** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
442 			struct list_head kernel_bo_present;
443 			/** @pinned.late.evicted: pinned BO that have been evicted */
444 			struct list_head evicted;
445 			/** @pinned.external: pinned external and dma-buf. */
446 			struct list_head external;
447 		} late;
448 	} pinned;
449 
450 	/** @ufence_wq: user fence wait queue */
451 	wait_queue_head_t ufence_wq;
452 
453 	/** @preempt_fence_wq: used to serialize preempt fences */
454 	struct workqueue_struct *preempt_fence_wq;
455 
456 	/** @ordered_wq: used to serialize compute mode resume */
457 	struct workqueue_struct *ordered_wq;
458 
459 	/** @unordered_wq: used to serialize unordered work, mostly display */
460 	struct workqueue_struct *unordered_wq;
461 
462 	/** @destroy_wq: used to serialize user destroy work, like queue */
463 	struct workqueue_struct *destroy_wq;
464 
465 	/** @tiles: device tiles */
466 	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
467 
468 	/**
469 	 * @mem_access: keep track of memory access in the device, possibly
470 	 * triggering additional actions when they occur.
471 	 */
472 	struct {
473 		/**
474 		 * @mem_access.vram_userfault: Encapsulate vram_userfault
475 		 * related stuff
476 		 */
477 		struct {
478 			/**
479 			 * @mem_access.vram_userfault.lock: Protects access to
480 			 * @vram_usefault.list Using mutex instead of spinlock
481 			 * as lock is applied to entire list operation which
482 			 * may sleep
483 			 */
484 			struct mutex lock;
485 
486 			/**
487 			 * @mem_access.vram_userfault.list: Keep list of userfaulted
488 			 * vram bo, which require to release their mmap mappings
489 			 * at runtime suspend path
490 			 */
491 			struct list_head list;
492 		} vram_userfault;
493 	} mem_access;
494 
495 	/**
496 	 * @pat: Encapsulate PAT related stuff
497 	 */
498 	struct {
499 		/** @pat.ops: Internal operations to abstract platforms */
500 		const struct xe_pat_ops *ops;
501 		/** @pat.table: PAT table to program in the HW */
502 		const struct xe_pat_table_entry *table;
503 		/** @pat.n_entries: Number of PAT entries */
504 		int n_entries;
505 		u32 idx[__XE_CACHE_LEVEL_COUNT];
506 	} pat;
507 
508 	/** @d3cold: Encapsulate d3cold related stuff */
509 	struct {
510 		/** @d3cold.capable: Indicates if root port is d3cold capable */
511 		bool capable;
512 
513 		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
514 		bool allowed;
515 
516 		/**
517 		 * @d3cold.vram_threshold:
518 		 *
519 		 * This represents the permissible threshold(in megabytes)
520 		 * for vram save/restore. d3cold will be disallowed,
521 		 * when vram_usages is above or equals the threshold value
522 		 * to avoid the vram save/restore latency.
523 		 * Default threshold value is 300mb.
524 		 */
525 		u32 vram_threshold;
526 		/** @d3cold.lock: protect vram_threshold */
527 		struct mutex lock;
528 	} d3cold;
529 
530 	/** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
531 	struct notifier_block pm_notifier;
532 
533 	/** @pmt: Support the PMT driver callback interface */
534 	struct {
535 		/** @pmt.lock: protect access for telemetry data */
536 		struct mutex lock;
537 	} pmt;
538 
539 	/**
540 	 * @pm_callback_task: Track the active task that is running in either
541 	 * the runtime_suspend or runtime_resume callbacks.
542 	 */
543 	struct task_struct *pm_callback_task;
544 
545 	/** @hwmon: hwmon subsystem integration */
546 	struct xe_hwmon *hwmon;
547 
548 	/** @heci_gsc: graphics security controller */
549 	struct xe_heci_gsc heci_gsc;
550 
551 	/** @oa: oa observation subsystem */
552 	struct xe_oa oa;
553 
554 	/** @pxp: Encapsulate Protected Xe Path support */
555 	struct xe_pxp *pxp;
556 
557 	/** @needs_flr_on_fini: requests function-reset on fini */
558 	bool needs_flr_on_fini;
559 
560 	/** @wedged: Struct to control Wedged States and mode */
561 	struct {
562 		/** @wedged.flag: Xe device faced a critical error and is now blocked. */
563 		atomic_t flag;
564 		/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
565 		int mode;
566 	} wedged;
567 
568 	/** @bo_device: Struct to control async free of BOs */
569 	struct xe_bo_dev {
570 		/** @bo_device.async_free: Free worker */
571 		struct work_struct async_free;
572 		/** @bo_device.async_list: List of BOs to be freed */
573 		struct llist_head async_list;
574 	} bo_device;
575 
576 	/** @pmu: performance monitoring unit */
577 	struct xe_pmu pmu;
578 
579 #ifdef TEST_VM_OPS_ERROR
580 	/**
581 	 * @vm_inject_error_position: inject errors at different places in VM
582 	 * bind IOCTL based on this value
583 	 */
584 	u8 vm_inject_error_position;
585 #endif
586 
587 	/* private: */
588 
589 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
590 	/*
591 	 * Any fields below this point are the ones used by display.
592 	 * They are temporarily added here so xe_device can be desguised as
593 	 * drm_i915_private during build. After cleanup these should go away,
594 	 * migrating to the right sub-structs
595 	 */
596 	struct intel_display display;
597 
598 	struct dram_info {
599 		bool wm_lv_0_adjust_needed;
600 		u8 num_channels;
601 		bool symmetric_memory;
602 		enum intel_dram_type {
603 			INTEL_DRAM_UNKNOWN,
604 			INTEL_DRAM_DDR3,
605 			INTEL_DRAM_DDR4,
606 			INTEL_DRAM_LPDDR3,
607 			INTEL_DRAM_LPDDR4,
608 			INTEL_DRAM_DDR5,
609 			INTEL_DRAM_LPDDR5,
610 			INTEL_DRAM_GDDR,
611 			INTEL_DRAM_GDDR_ECC,
612 			__INTEL_DRAM_TYPE_MAX,
613 		} type;
614 		u8 num_qgv_points;
615 		u8 num_psf_gv_points;
616 	} dram_info;
617 
618 	/*
619 	 * edram size in MB.
620 	 * Cannot be determined by PCIID. You must always read a register.
621 	 */
622 	u32 edram_size_mb;
623 
624 	/* To shut up runtime pm macros.. */
625 	struct xe_runtime_pm {} runtime_pm;
626 
627 	/* only to allow build, not used functionally */
628 	u32 irq_mask;
629 
630 	struct intel_uncore {
631 		spinlock_t lock;
632 	} uncore;
633 
634 	/* only to allow build, not used functionally */
635 	struct {
636 		unsigned int hpll_freq;
637 		unsigned int czclk_freq;
638 		unsigned int fsb_freq, mem_freq, is_ddr3;
639 	};
640 #endif
641 };
642 
643 /**
644  * struct xe_file - file handle for XE driver
645  */
646 struct xe_file {
647 	/** @xe: xe DEVICE **/
648 	struct xe_device *xe;
649 
650 	/** @drm: base DRM file */
651 	struct drm_file *drm;
652 
653 	/** @vm: VM state for file */
654 	struct {
655 		/** @vm.xe: xarray to store VMs */
656 		struct xarray xa;
657 		/**
658 		 * @vm.lock: Protects VM lookup + reference and removal from
659 		 * file xarray. Not an intended to be an outer lock which does
660 		 * thing while being held.
661 		 */
662 		struct mutex lock;
663 	} vm;
664 
665 	/** @exec_queue: Submission exec queue state for file */
666 	struct {
667 		/** @exec_queue.xa: xarray to store exece queues */
668 		struct xarray xa;
669 		/**
670 		 * @exec_queue.lock: Protects exec queue lookup + reference and
671 		 * removal from file xarray. Not intended to be an outer lock
672 		 * which does things while being held.
673 		 */
674 		struct mutex lock;
675 		/**
676 		 * @exec_queue.pending_removal: items pending to be removed to
677 		 * synchronize GPU state update with ongoing query.
678 		 */
679 		atomic_t pending_removal;
680 	} exec_queue;
681 
682 	/** @run_ticks: hw engine class run time in ticks for this drm client */
683 	u64 run_ticks[XE_ENGINE_CLASS_MAX];
684 
685 	/** @client: drm client */
686 	struct xe_drm_client *client;
687 
688 	/**
689 	 * @process_name: process name for file handle, used to safely output
690 	 * during error situations where xe file can outlive process
691 	 */
692 	char *process_name;
693 
694 	/**
695 	 * @pid: pid for file handle, used to safely output uring error
696 	 * situations where xe file can outlive process
697 	 */
698 	pid_t pid;
699 
700 	/** @refcount: ref count of this xe file */
701 	struct kref refcount;
702 };
703 
704 #endif
705