xref: /linux/drivers/gpu/drm/xe/xe_device_types.h (revision ce517adff59c50a54a33ae90e2947eff9698dc86)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  */
5 
6 #ifndef _XE_DEVICE_TYPES_H_
7 #define _XE_DEVICE_TYPES_H_
8 
9 #include <linux/pci.h>
10 
11 #include <drm/drm_device.h>
12 #include <drm/drm_file.h>
13 #include <drm/ttm/ttm_device.h>
14 
15 #include "xe_devcoredump_types.h"
16 #include "xe_drm_ras_types.h"
17 #include "xe_heci_gsc.h"
18 #include "xe_late_bind_fw_types.h"
19 #include "xe_oa_types.h"
20 #include "xe_pagefault_types.h"
21 #include "xe_platform_types.h"
22 #include "xe_pmu_types.h"
23 #include "xe_pt_types.h"
24 #include "xe_sriov_pf_types.h"
25 #include "xe_sriov_types.h"
26 #include "xe_sriov_vf_types.h"
27 #include "xe_sriov_vf_ccs_types.h"
28 #include "xe_step_types.h"
29 #include "xe_survivability_mode_types.h"
30 #include "xe_tile_types.h"
31 #include "xe_validation.h"
32 
33 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
34 #define TEST_VM_OPS_ERROR
35 #endif
36 
37 struct drm_pagemap_shrinker;
38 struct intel_display;
39 struct intel_dg_nvm_dev;
40 struct xe_ggtt;
41 struct xe_i2c;
42 struct xe_pat_ops;
43 struct xe_pxp;
44 struct xe_vram_region;
45 
46 /**
47  * enum xe_wedged_mode - possible wedged modes
48  * @XE_WEDGED_MODE_NEVER: Device will never be declared wedged.
49  * @XE_WEDGED_MODE_UPON_CRITICAL_ERROR: Device will be declared wedged only
50  *	when critical error occurs like GT reset failure or firmware failure.
51  *	This is the default mode.
52  * @XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET: Device will be declared wedged on
53  *	any hang. In this mode, engine resets are disabled to avoid automatic
54  *	recovery attempts. This mode is primarily intended for debugging hangs.
55  */
56 enum xe_wedged_mode {
57 	XE_WEDGED_MODE_NEVER = 0,
58 	XE_WEDGED_MODE_UPON_CRITICAL_ERROR = 1,
59 	XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET = 2,
60 };
61 
62 #define XE_BO_INVALID_OFFSET	LONG_MAX
63 
64 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
65 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
66 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
67 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
68 #define IS_DGFX(xe) ((xe)->info.is_dgfx)
69 
70 #define XE_VRAM_FLAGS_NEED64K		BIT(0)
71 
72 #define XE_GT0		0
73 #define XE_GT1		1
74 #define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
75 
76 /*
77  * Highest GT/tile count for any platform.  Used only for memory allocation
78  * sizing.  Any logic looping over GTs or mapping userspace GT IDs into GT
79  * structures should use the per-platform xe->info.max_gt_per_tile instead.
80  */
81 #define XE_MAX_GT_PER_TILE 2
82 
83 #define XE_MAX_ASID	(BIT(20))
84 
85 /**
86  * struct xe_device - Top level struct of Xe device
87  */
88 struct xe_device {
89 	/** @drm: drm device */
90 	struct drm_device drm;
91 
92 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
93 	/** @display: display device data, must be placed after drm device member */
94 	struct intel_display *display;
95 #endif
96 
97 	/** @devcoredump: device coredump */
98 	struct xe_devcoredump devcoredump;
99 
100 	/** @info: device info */
101 	struct intel_device_info {
102 		/** @info.platform_name: platform name */
103 		const char *platform_name;
104 		/** @info.graphics_name: graphics IP name */
105 		const char *graphics_name;
106 		/** @info.media_name: media IP name */
107 		const char *media_name;
108 		/** @info.graphics_verx100: graphics IP version */
109 		u32 graphics_verx100;
110 		/** @info.media_verx100: media IP version */
111 		u32 media_verx100;
112 		/** @info.mem_region_mask: mask of valid memory regions */
113 		u32 mem_region_mask;
114 		/** @info.platform: Xe platform enum */
115 		enum xe_platform platform;
116 		/** @info.subplatform: Xe subplatform enum */
117 		enum xe_subplatform subplatform;
118 		/** @info.devid: device ID */
119 		u16 devid;
120 		/** @info.revid: device revision */
121 		u8 revid;
122 		/** @info.step: stepping information for each IP */
123 		struct xe_step_info step;
124 		/** @info.dma_mask_size: DMA address bits */
125 		u8 dma_mask_size;
126 		/** @info.vram_flags: Vram flags */
127 		u8 vram_flags;
128 		/** @info.tile_count: Number of tiles */
129 		u8 tile_count;
130 		/** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
131 		u8 max_gt_per_tile;
132 		/** @info.multi_lrc_mask: bitmask of engine classes which support multi-lrc */
133 		u8 multi_lrc_mask;
134 		/** @info.gt_count: Total number of GTs for entire device */
135 		u8 gt_count;
136 		/** @info.vm_max_level: Max VM level */
137 		u8 vm_max_level;
138 		/** @info.va_bits: Maximum bits of a virtual address */
139 		u8 va_bits;
140 
141 		/*
142 		 * Keep all flags below alphabetically sorted
143 		 */
144 
145 		/** @info.force_execlist: Forced execlist submission */
146 		u8 force_execlist:1;
147 		/** @info.has_access_counter: Device supports access counter */
148 		u8 has_access_counter:1;
149 		/** @info.has_asid: Has address space ID */
150 		u8 has_asid:1;
151 		/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
152 		u8 has_atomic_enable_pte_bit:1;
153 		/** @info.has_cached_pt: Supports caching pagetable */
154 		u8 has_cached_pt:1;
155 		/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
156 		u8 has_device_atomics_on_smem:1;
157 		/** @info.has_fan_control: Device supports fan control */
158 		u8 has_fan_control:1;
159 		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
160 		u8 has_flat_ccs:1;
161 		/** @info.has_gsc_nvm: Device has gsc non-volatile memory */
162 		u8 has_gsc_nvm:1;
163 		/** @info.has_heci_cscfi: device has heci cscfi */
164 		u8 has_heci_cscfi:1;
165 		/** @info.has_heci_gscfi: device has heci gscfi */
166 		u8 has_heci_gscfi:1;
167 		/** @info.has_i2c: Device has I2C controller */
168 		u8 has_i2c:1;
169 		/** @info.has_late_bind: Device has firmware late binding support */
170 		u8 has_late_bind:1;
171 		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
172 		u8 has_llc:1;
173 		/** @info.has_mbx_power_limits: Device has support to manage power limits using
174 		 * pcode mailbox commands.
175 		 */
176 		u8 has_mbx_power_limits:1;
177 		/** @info.has_mbx_thermal_info: Device supports thermal mailbox commands */
178 		u8 has_mbx_thermal_info:1;
179 		/** @info.has_mem_copy_instr: Device supports MEM_COPY instruction */
180 		u8 has_mem_copy_instr:1;
181 		/** @info.has_mert: Device has standalone MERT */
182 		u8 has_mert:1;
183 		/** @info.has_page_reclaim_hw_assist: Device supports page reclamation feature */
184 		u8 has_page_reclaim_hw_assist:1;
185 		/** @info.has_pre_prod_wa: Pre-production workarounds still present in driver */
186 		u8 has_pre_prod_wa:1;
187 		/** @info.has_pxp: Device has PXP support */
188 		u8 has_pxp:1;
189 		/** @info.has_ctx_tlb_inval: Has context based TLB invalidations */
190 		u8 has_ctx_tlb_inval:1;
191 		/** @info.has_range_tlb_inval: Has range based TLB invalidations */
192 		u8 has_range_tlb_inval:1;
193 		/** @info.has_soc_remapper_sysctrl: Has SoC remapper system controller */
194 		u8 has_soc_remapper_sysctrl:1;
195 		/** @info.has_soc_remapper_telem: Has SoC remapper telemetry support */
196 		u8 has_soc_remapper_telem:1;
197 		/** @info.has_sriov: Supports SR-IOV */
198 		u8 has_sriov:1;
199 		/** @info.has_usm: Device has unified shared memory support */
200 		u8 has_usm:1;
201 		/** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
202 		u8 has_64bit_timestamp:1;
203 		/** @info.is_dgfx: is discrete device */
204 		u8 is_dgfx:1;
205 		/** @info.needs_scratch: needs scratch page for oob prefetch to work */
206 		u8 needs_scratch:1;
207 		/**
208 		 * @info.probe_display: Probe display hardware.  If set to
209 		 * false, the driver will behave as if there is no display
210 		 * hardware present and will not try to read/write to it in any
211 		 * way.  The display hardware, if it exists, will not be
212 		 * exposed to userspace and will be left untouched in whatever
213 		 * state the firmware or bootloader left it in.
214 		 */
215 		u8 probe_display:1;
216 		/** @info.skip_guc_pc: Skip GuC based PM feature init */
217 		u8 skip_guc_pc:1;
218 		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
219 		u8 skip_mtcfg:1;
220 		/** @info.skip_pcode: skip access to PCODE uC */
221 		u8 skip_pcode:1;
222 		/** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */
223 		u8 needs_shared_vf_gt_wq:1;
224 	} info;
225 
226 	/** @wa_active: keep track of active workarounds */
227 	struct {
228 		/** @wa_active.oob: bitmap with active OOB workarounds */
229 		unsigned long *oob;
230 
231 		/**
232 		 * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
233 		 * of XE_DEVICE_WA() - it can only be called on initialization after
234 		 * Device OOB WAs have been processed.
235 		 */
236 		bool oob_initialized;
237 	} wa_active;
238 
239 	/** @survivability: survivability information for device */
240 	struct xe_survivability survivability;
241 
242 	/** @irq: device interrupt state */
243 	struct {
244 		/** @irq.lock: lock for processing irq's on this device */
245 		spinlock_t lock;
246 
247 		/** @irq.enabled: interrupts enabled on this device */
248 		atomic_t enabled;
249 
250 		/** @irq.msix: irq info for platforms that support MSI-X */
251 		struct {
252 			/** @irq.msix.nvec: number of MSI-X interrupts */
253 			u16 nvec;
254 			/** @irq.msix.indexes: used to allocate MSI-X indexes */
255 			struct xarray indexes;
256 		} msix;
257 	} irq;
258 
259 	/** @ttm: ttm device */
260 	struct ttm_device ttm;
261 
262 	/** @mmio: mmio info for device */
263 	struct {
264 		/** @mmio.size: size of MMIO space for device */
265 		size_t size;
266 		/** @mmio.regs: pointer to MMIO space for device */
267 		void __iomem *regs;
268 	} mmio;
269 
270 	/** @mem: memory info for device */
271 	struct {
272 		/** @mem.vram: VRAM info for device */
273 		struct xe_vram_region *vram;
274 		/** @mem.sys_mgr: system TTM manager */
275 		struct ttm_resource_manager sys_mgr;
276 		/** @mem.sys_mgr: system memory shrinker. */
277 		struct xe_shrinker *shrinker;
278 	} mem;
279 
280 	/** @sriov: device level virtualization data */
281 	struct {
282 		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
283 		enum xe_sriov_mode __mode;
284 
285 		union {
286 			/** @sriov.pf: PF specific data */
287 			struct xe_device_pf pf;
288 			/** @sriov.vf: VF specific data */
289 			struct xe_device_vf vf;
290 		};
291 
292 		/** @sriov.wq: workqueue used by the virtualization workers */
293 		struct workqueue_struct *wq;
294 	} sriov;
295 
296 	/** @usm: unified memory state */
297 	struct {
298 		/** @usm.asid: convert a ASID to VM */
299 		struct xarray asid_to_vm;
300 		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
301 		u32 next_asid;
302 		/** @usm.lock: protects UM state */
303 		struct rw_semaphore lock;
304 		/** @usm.pf_wq: page fault work queue, unbound, high priority */
305 		struct workqueue_struct *pf_wq;
306 		/*
307 		 * We pick 4 here because, in the current implementation, it
308 		 * yields the best bandwidth utilization of the kernel paging
309 		 * engine.
310 		 */
311 #define XE_PAGEFAULT_QUEUE_COUNT	4
312 		/** @usm.pf_queue: Page fault queues */
313 		struct xe_pagefault_queue pf_queue[XE_PAGEFAULT_QUEUE_COUNT];
314 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
315 		/** @usm.pagemap_shrinker: Shrinker for unused pagemaps */
316 		struct drm_pagemap_shrinker *dpagemap_shrinker;
317 #endif
318 	} usm;
319 
320 	/** @pinned: pinned BO state */
321 	struct {
322 		/** @pinned.lock: protected pinned BO list state */
323 		spinlock_t lock;
324 		/** @pinned.early: early pinned lists */
325 		struct {
326 			/** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
327 			struct list_head kernel_bo_present;
328 			/** @pinned.early.evicted: pinned BO that have been evicted */
329 			struct list_head evicted;
330 		} early;
331 		/** @pinned.late: late pinned lists */
332 		struct {
333 			/** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
334 			struct list_head kernel_bo_present;
335 			/** @pinned.late.evicted: pinned BO that have been evicted */
336 			struct list_head evicted;
337 			/** @pinned.external: pinned external and dma-buf. */
338 			struct list_head external;
339 		} late;
340 	} pinned;
341 
342 	/** @ufence_wq: user fence wait queue */
343 	wait_queue_head_t ufence_wq;
344 
345 	/** @preempt_fence_wq: used to serialize preempt fences */
346 	struct workqueue_struct *preempt_fence_wq;
347 
348 	/** @ordered_wq: used to serialize compute mode resume */
349 	struct workqueue_struct *ordered_wq;
350 
351 	/** @unordered_wq: used to serialize unordered work */
352 	struct workqueue_struct *unordered_wq;
353 
354 	/** @destroy_wq: used to serialize user destroy work, like queue */
355 	struct workqueue_struct *destroy_wq;
356 
357 	/** @tiles: device tiles */
358 	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
359 
360 	/**
361 	 * @mem_access: keep track of memory access in the device, possibly
362 	 * triggering additional actions when they occur.
363 	 */
364 	struct {
365 		/**
366 		 * @mem_access.vram_userfault: Encapsulate vram_userfault
367 		 * related stuff
368 		 */
369 		struct {
370 			/**
371 			 * @mem_access.vram_userfault.lock: Protects access to
372 			 * @vram_usefault.list Using mutex instead of spinlock
373 			 * as lock is applied to entire list operation which
374 			 * may sleep
375 			 */
376 			struct mutex lock;
377 
378 			/**
379 			 * @mem_access.vram_userfault.list: Keep list of userfaulted
380 			 * vram bo, which require to release their mmap mappings
381 			 * at runtime suspend path
382 			 */
383 			struct list_head list;
384 		} vram_userfault;
385 	} mem_access;
386 
387 	/**
388 	 * @pat: Encapsulate PAT related stuff
389 	 */
390 	struct {
391 		/** @pat.ops: Internal operations to abstract platforms */
392 		const struct xe_pat_ops *ops;
393 		/** @pat.table: PAT table to program in the HW */
394 		const struct xe_pat_table_entry *table;
395 		/** @pat.n_entries: Number of PAT entries */
396 		int n_entries;
397 		/** @pat.pat_ats: PAT entry for PCIe ATS responses */
398 		const struct xe_pat_table_entry *pat_ats;
399 		/** @pat.pat_primary_pta: primary GT PAT entry for page table accesses */
400 		const struct xe_pat_table_entry *pat_primary_pta;
401 		/** @pat.pat_media_pta: media GT PAT entry for page table accesses */
402 		const struct xe_pat_table_entry *pat_media_pta;
403 		u32 idx[__XE_CACHE_LEVEL_COUNT];
404 	} pat;
405 
406 	/** @d3cold: Encapsulate d3cold related stuff */
407 	struct {
408 		/** @d3cold.capable: Indicates if root port is d3cold capable */
409 		bool capable;
410 
411 		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
412 		bool allowed;
413 
414 		/**
415 		 * @d3cold.vram_threshold:
416 		 *
417 		 * This represents the permissible threshold(in megabytes)
418 		 * for vram save/restore. d3cold will be disallowed,
419 		 * when vram_usages is above or equals the threshold value
420 		 * to avoid the vram save/restore latency.
421 		 * Default threshold value is 300mb.
422 		 */
423 		u32 vram_threshold;
424 		/** @d3cold.lock: protect vram_threshold */
425 		struct mutex lock;
426 	} d3cold;
427 
428 	/** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
429 	struct notifier_block pm_notifier;
430 	/** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */
431 	struct completion pm_block;
432 	/** @rebind_resume_list: List of wq items to kick on resume. */
433 	struct list_head rebind_resume_list;
434 	/** @rebind_resume_lock: Lock to protect the rebind_resume_list */
435 	struct mutex rebind_resume_lock;
436 
437 	/** @pmt: Support the PMT driver callback interface */
438 	struct {
439 		/** @pmt.lock: protect access for telemetry data */
440 		struct mutex lock;
441 	} pmt;
442 
443 	/** @soc_remapper: SoC remapper object */
444 	struct {
445 		/** @soc_remapper.lock: Serialize access to SoC Remapper's index registers */
446 		spinlock_t lock;
447 
448 		/** @soc_remapper.set_telem_region: Set telemetry index */
449 		void (*set_telem_region)(struct xe_device *xe, u32 index);
450 
451 		/** @soc_remapper.set_sysctrl_region: Set system controller index */
452 		void (*set_sysctrl_region)(struct xe_device *xe, u32 index);
453 	} soc_remapper;
454 
455 	/**
456 	 * @pm_callback_task: Track the active task that is running in either
457 	 * the runtime_suspend or runtime_resume callbacks.
458 	 */
459 	struct task_struct *pm_callback_task;
460 
461 	/** @hwmon: hwmon subsystem integration */
462 	struct xe_hwmon *hwmon;
463 
464 	/** @heci_gsc: graphics security controller */
465 	struct xe_heci_gsc heci_gsc;
466 
467 	/** @nvm: discrete graphics non-volatile memory */
468 	struct intel_dg_nvm_dev *nvm;
469 
470 	/** @late_bind: xe mei late bind interface */
471 	struct xe_late_bind late_bind;
472 
473 	/** @oa: oa observation subsystem */
474 	struct xe_oa oa;
475 
476 	/** @pxp: Encapsulate Protected Xe Path support */
477 	struct xe_pxp *pxp;
478 
479 	/** @needs_flr_on_fini: requests function-reset on fini */
480 	bool needs_flr_on_fini;
481 
482 	/** @wedged: Struct to control Wedged States and mode */
483 	struct {
484 		/** @wedged.flag: Xe device faced a critical error and is now blocked. */
485 		atomic_t flag;
486 		/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
487 		enum xe_wedged_mode mode;
488 		/** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
489 		unsigned long method;
490 		/** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */
491 		bool inconsistent_reset;
492 	} wedged;
493 
494 	/** @bo_device: Struct to control async free of BOs */
495 	struct xe_bo_dev {
496 		/** @bo_device.async_free: Free worker */
497 		struct work_struct async_free;
498 		/** @bo_device.async_list: List of BOs to be freed */
499 		struct llist_head async_list;
500 	} bo_device;
501 
502 	/** @pmu: performance monitoring unit */
503 	struct xe_pmu pmu;
504 
505 	/** @ras: RAS structure for device */
506 	struct xe_drm_ras ras;
507 
508 	/** @i2c: I2C host controller */
509 	struct xe_i2c *i2c;
510 
511 	/** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
512 	u32 atomic_svm_timeslice_ms;
513 
514 	/** @min_run_period_lr_ms: LR VM (preempt fence mode) timeslice */
515 	u32 min_run_period_lr_ms;
516 
517 	/** @min_run_period_pf_ms: LR VM (page fault mode) timeslice */
518 	u32 min_run_period_pf_ms;
519 
520 #ifdef TEST_VM_OPS_ERROR
521 	/**
522 	 * @vm_inject_error_position: inject errors at different places in VM
523 	 * bind IOCTL based on this value
524 	 */
525 	u8 vm_inject_error_position;
526 #endif
527 
528 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
529 	/**
530 	 * @global_total_pages: global GPU page usage tracked for gpu_mem
531 	 * tracepoints
532 	 */
533 	atomic64_t global_total_pages;
534 #endif
535 	/** @val: The domain for exhaustive eviction, which is currently per device. */
536 	struct xe_validation_device val;
537 
538 	/** @psmi: GPU debugging via additional validation HW */
539 	struct {
540 		/** @psmi.capture_obj: PSMI buffer for VRAM */
541 		struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1];
542 		/** @psmi.region_mask: Mask of valid memory regions */
543 		u8 region_mask;
544 	} psmi;
545 
546 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
547 	/** @g2g_test_array: for testing G2G communications */
548 	u32 *g2g_test_array;
549 	/** @g2g_test_count: for testing G2G communications */
550 	atomic_t g2g_test_count;
551 #endif
552 
553 	/* private: */
554 
555 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
556 	/*
557 	 * Any fields below this point are the ones used by display.
558 	 * They are temporarily added here so xe_device can be desguised as
559 	 * drm_i915_private during build. After cleanup these should go away,
560 	 * migrating to the right sub-structs
561 	 */
562 
563 	struct intel_uncore {
564 		spinlock_t lock;
565 	} uncore;
566 #endif
567 };
568 
569 /**
570  * struct xe_file - file handle for Xe driver
571  */
572 struct xe_file {
573 	/** @xe: xe DEVICE **/
574 	struct xe_device *xe;
575 
576 	/** @drm: base DRM file */
577 	struct drm_file *drm;
578 
579 	/** @vm: VM state for file */
580 	struct {
581 		/** @vm.xe: xarray to store VMs */
582 		struct xarray xa;
583 		/**
584 		 * @vm.lock: Protects VM lookup + reference and removal from
585 		 * file xarray. Not an intended to be an outer lock which does
586 		 * thing while being held.
587 		 */
588 		struct mutex lock;
589 	} vm;
590 
591 	/** @exec_queue: Submission exec queue state for file */
592 	struct {
593 		/** @exec_queue.xa: xarray to store exece queues */
594 		struct xarray xa;
595 		/**
596 		 * @exec_queue.lock: Protects exec queue lookup + reference and
597 		 * removal from file xarray. Not intended to be an outer lock
598 		 * which does things while being held.
599 		 */
600 		struct mutex lock;
601 		/**
602 		 * @exec_queue.pending_removal: items pending to be removed to
603 		 * synchronize GPU state update with ongoing query.
604 		 */
605 		atomic_t pending_removal;
606 	} exec_queue;
607 
608 	/** @run_ticks: hw engine class run time in ticks for this drm client */
609 	u64 run_ticks[XE_ENGINE_CLASS_MAX];
610 
611 	/** @client: drm client */
612 	struct xe_drm_client *client;
613 
614 	/**
615 	 * @process_name: process name for file handle, used to safely output
616 	 * during error situations where xe file can outlive process
617 	 */
618 	char *process_name;
619 
620 	/**
621 	 * @pid: pid for file handle, used to safely output uring error
622 	 * situations where xe file can outlive process
623 	 */
624 	pid_t pid;
625 
626 	/** @refcount: ref count of this xe file */
627 	struct kref refcount;
628 };
629 
630 #endif
631