xref: /linux/drivers/gpu/drm/xe/xe_device_types.h (revision 72c181399b01bb4836d1fabaa9f5f6438c82178e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  */
5 
6 #ifndef _XE_DEVICE_TYPES_H_
7 #define _XE_DEVICE_TYPES_H_
8 
9 #include <linux/pci.h>
10 
11 #include <drm/drm_device.h>
12 #include <drm/drm_file.h>
13 #include <drm/ttm/ttm_device.h>
14 
15 #include "xe_devcoredump_types.h"
16 #include "xe_heci_gsc.h"
17 #include "xe_late_bind_fw_types.h"
18 #include "xe_lmtt_types.h"
19 #include "xe_memirq_types.h"
20 #include "xe_oa_types.h"
21 #include "xe_platform_types.h"
22 #include "xe_pmu_types.h"
23 #include "xe_pt_types.h"
24 #include "xe_sriov_pf_types.h"
25 #include "xe_sriov_types.h"
26 #include "xe_sriov_vf_types.h"
27 #include "xe_sriov_vf_ccs_types.h"
28 #include "xe_step_types.h"
29 #include "xe_survivability_mode_types.h"
30 #include "xe_validation.h"
31 
32 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
33 #define TEST_VM_OPS_ERROR
34 #endif
35 
36 struct dram_info;
37 struct intel_display;
38 struct intel_dg_nvm_dev;
39 struct xe_ggtt;
40 struct xe_i2c;
41 struct xe_pat_ops;
42 struct xe_pxp;
43 struct xe_vram_region;
44 
45 #define XE_BO_INVALID_OFFSET	LONG_MAX
46 
47 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
48 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
49 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
50 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
51 #define IS_DGFX(xe) ((xe)->info.is_dgfx)
52 
53 #define XE_VRAM_FLAGS_NEED64K		BIT(0)
54 
55 #define XE_GT0		0
56 #define XE_GT1		1
57 #define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
58 
59 #define XE_MAX_ASID	(BIT(20))
60 
61 #define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step)	\
62 	((_xe)->info.platform == (_platform) &&			\
63 	 (_xe)->info.step.graphics >= (min_step) &&		\
64 	 (_xe)->info.step.graphics < (max_step))
65 #define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step)	\
66 	((_xe)->info.platform == (_platform) &&				\
67 	 (_xe)->info.subplatform == (sub) &&				\
68 	 (_xe)->info.step.graphics >= (min_step) &&			\
69 	 (_xe)->info.step.graphics < (max_step))
70 
71 #define tile_to_xe(tile__)								\
72 	_Generic(tile__,								\
73 		 const struct xe_tile * : (const struct xe_device *)((tile__)->xe),	\
74 		 struct xe_tile * : (tile__)->xe)
75 
76 /**
77  * struct xe_mmio - register mmio structure
78  *
79  * Represents an MMIO region that the CPU may use to access registers.  A
80  * region may share its IO map with other regions (e.g., all GTs within a
81  * tile share the same map with their parent tile, but represent different
82  * subregions of the overall IO space).
83  */
84 struct xe_mmio {
85 	/** @tile: Backpointer to tile, used for tracing */
86 	struct xe_tile *tile;
87 
88 	/** @regs: Map used to access registers. */
89 	void __iomem *regs;
90 
91 	/**
92 	 * @sriov_vf_gt: Backpointer to GT.
93 	 *
94 	 * This pointer is only set for GT MMIO regions and only when running
95 	 * as an SRIOV VF structure
96 	 */
97 	struct xe_gt *sriov_vf_gt;
98 
99 	/**
100 	 * @regs_size: Length of the register region within the map.
101 	 *
102 	 * The size of the iomap set in *regs is generally larger than the
103 	 * register mmio space since it includes unused regions and/or
104 	 * non-register regions such as the GGTT PTEs.
105 	 */
106 	size_t regs_size;
107 
108 	/** @adj_limit: adjust MMIO address if address is below this value */
109 	u32 adj_limit;
110 
111 	/** @adj_offset: offset to add to MMIO address when adjusting */
112 	u32 adj_offset;
113 };
114 
115 /**
116  * struct xe_tile - hardware tile structure
117  *
118  * From a driver perspective, a "tile" is effectively a complete GPU, containing
119  * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
120  *
121  * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
122  * device and designate one "root" tile as being responsible for external PCI
123  * communication.  PCI BAR0 exposes the GGTT and MMIO register space for each
124  * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
125  * with each tile similarly.  Device-wide interrupts can be enabled/disabled
126  * at the root tile, and the MSTR_TILE_INTR register will report which tiles
127  * have interrupts that need servicing.
128  */
129 struct xe_tile {
130 	/** @xe: Backpointer to tile's PCI device */
131 	struct xe_device *xe;
132 
133 	/** @id: ID of the tile */
134 	u8 id;
135 
136 	/**
137 	 * @primary_gt: Primary GT
138 	 */
139 	struct xe_gt *primary_gt;
140 
141 	/**
142 	 * @media_gt: Media GT
143 	 *
144 	 * Only present on devices with media version >= 13.
145 	 */
146 	struct xe_gt *media_gt;
147 
148 	/**
149 	 * @mmio: MMIO info for a tile.
150 	 *
151 	 * Each tile has its own 16MB space in BAR0, laid out as:
152 	 * * 0-4MB: registers
153 	 * * 4MB-8MB: reserved
154 	 * * 8MB-16MB: global GTT
155 	 */
156 	struct xe_mmio mmio;
157 
158 	/** @mem: memory management info for tile */
159 	struct {
160 		/**
161 		 * @mem.vram: VRAM info for tile.
162 		 *
163 		 * Although VRAM is associated with a specific tile, it can
164 		 * still be accessed by all tiles' GTs.
165 		 */
166 		struct xe_vram_region *vram;
167 
168 		/** @mem.ggtt: Global graphics translation table */
169 		struct xe_ggtt *ggtt;
170 
171 		/**
172 		 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
173 		 *
174 		 * Media GT shares a pool with its primary GT.
175 		 */
176 		struct xe_sa_manager *kernel_bb_pool;
177 	} mem;
178 
179 	/** @sriov: tile level virtualization data */
180 	union {
181 		struct {
182 			/** @sriov.pf.lmtt: Local Memory Translation Table. */
183 			struct xe_lmtt lmtt;
184 		} pf;
185 		struct {
186 			/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
187 			struct xe_ggtt_node *ggtt_balloon[2];
188 		} vf;
189 	} sriov;
190 
191 	/** @memirq: Memory Based Interrupts. */
192 	struct xe_memirq memirq;
193 
194 	/** @csc_hw_error_work: worker to report CSC HW errors */
195 	struct work_struct csc_hw_error_work;
196 
197 	/** @pcode: tile's PCODE */
198 	struct {
199 		/** @pcode.lock: protecting tile's PCODE mailbox data */
200 		struct mutex lock;
201 	} pcode;
202 
203 	/** @migrate: Migration helper for vram blits and clearing */
204 	struct xe_migrate *migrate;
205 
206 	/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
207 	struct kobject *sysfs;
208 
209 	/** @debugfs: debugfs directory associated with this tile */
210 	struct dentry *debugfs;
211 };
212 
213 /**
214  * struct xe_device - Top level struct of XE device
215  */
216 struct xe_device {
217 	/** @drm: drm device */
218 	struct drm_device drm;
219 
220 	/** @devcoredump: device coredump */
221 	struct xe_devcoredump devcoredump;
222 
223 	/** @info: device info */
224 	struct intel_device_info {
225 		/** @info.platform_name: platform name */
226 		const char *platform_name;
227 		/** @info.graphics_name: graphics IP name */
228 		const char *graphics_name;
229 		/** @info.media_name: media IP name */
230 		const char *media_name;
231 		/** @info.graphics_verx100: graphics IP version */
232 		u32 graphics_verx100;
233 		/** @info.media_verx100: media IP version */
234 		u32 media_verx100;
235 		/** @info.mem_region_mask: mask of valid memory regions */
236 		u32 mem_region_mask;
237 		/** @info.platform: XE platform enum */
238 		enum xe_platform platform;
239 		/** @info.subplatform: XE subplatform enum */
240 		enum xe_subplatform subplatform;
241 		/** @info.devid: device ID */
242 		u16 devid;
243 		/** @info.revid: device revision */
244 		u8 revid;
245 		/** @info.step: stepping information for each IP */
246 		struct xe_step_info step;
247 		/** @info.dma_mask_size: DMA address bits */
248 		u8 dma_mask_size;
249 		/** @info.vram_flags: Vram flags */
250 		u8 vram_flags;
251 		/** @info.tile_count: Number of tiles */
252 		u8 tile_count;
253 		/** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
254 		u8 max_gt_per_tile;
255 		/** @info.gt_count: Total number of GTs for entire device */
256 		u8 gt_count;
257 		/** @info.vm_max_level: Max VM level */
258 		u8 vm_max_level;
259 		/** @info.va_bits: Maximum bits of a virtual address */
260 		u8 va_bits;
261 
262 		/*
263 		 * Keep all flags below alphabetically sorted
264 		 */
265 
266 		/** @info.force_execlist: Forced execlist submission */
267 		u8 force_execlist:1;
268 		/** @info.has_asid: Has address space ID */
269 		u8 has_asid:1;
270 		/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
271 		u8 has_atomic_enable_pte_bit:1;
272 		/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
273 		u8 has_device_atomics_on_smem:1;
274 		/** @info.has_fan_control: Device supports fan control */
275 		u8 has_fan_control:1;
276 		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
277 		u8 has_flat_ccs:1;
278 		/** @info.has_gsc_nvm: Device has gsc non-volatile memory */
279 		u8 has_gsc_nvm:1;
280 		/** @info.has_heci_cscfi: device has heci cscfi */
281 		u8 has_heci_cscfi:1;
282 		/** @info.has_heci_gscfi: device has heci gscfi */
283 		u8 has_heci_gscfi:1;
284 		/** @info.has_late_bind: Device has firmware late binding support */
285 		u8 has_late_bind:1;
286 		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
287 		u8 has_llc:1;
288 		/** @info.has_mbx_power_limits: Device has support to manage power limits using
289 		 * pcode mailbox commands.
290 		 */
291 		u8 has_mbx_power_limits:1;
292 		/** @info.has_pxp: Device has PXP support */
293 		u8 has_pxp:1;
294 		/** @info.has_range_tlb_inval: Has range based TLB invalidations */
295 		u8 has_range_tlb_inval:1;
296 		/** @info.has_sriov: Supports SR-IOV */
297 		u8 has_sriov:1;
298 		/** @info.has_usm: Device has unified shared memory support */
299 		u8 has_usm:1;
300 		/** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
301 		u8 has_64bit_timestamp:1;
302 		/** @info.is_dgfx: is discrete device */
303 		u8 is_dgfx:1;
304 		/** @info.needs_scratch: needs scratch page for oob prefetch to work */
305 		u8 needs_scratch:1;
306 		/**
307 		 * @info.probe_display: Probe display hardware.  If set to
308 		 * false, the driver will behave as if there is no display
309 		 * hardware present and will not try to read/write to it in any
310 		 * way.  The display hardware, if it exists, will not be
311 		 * exposed to userspace and will be left untouched in whatever
312 		 * state the firmware or bootloader left it in.
313 		 */
314 		u8 probe_display:1;
315 		/** @info.skip_guc_pc: Skip GuC based PM feature init */
316 		u8 skip_guc_pc:1;
317 		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
318 		u8 skip_mtcfg:1;
319 		/** @info.skip_pcode: skip access to PCODE uC */
320 		u8 skip_pcode:1;
321 	} info;
322 
323 	/** @wa_active: keep track of active workarounds */
324 	struct {
325 		/** @wa_active.oob: bitmap with active OOB workarounds */
326 		unsigned long *oob;
327 
328 		/**
329 		 * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
330 		 * of XE_DEVICE_WA() - it can only be called on initialization after
331 		 * Device OOB WAs have been processed.
332 		 */
333 		bool oob_initialized;
334 	} wa_active;
335 
336 	/** @survivability: survivability information for device */
337 	struct xe_survivability survivability;
338 
339 	/** @irq: device interrupt state */
340 	struct {
341 		/** @irq.lock: lock for processing irq's on this device */
342 		spinlock_t lock;
343 
344 		/** @irq.enabled: interrupts enabled on this device */
345 		atomic_t enabled;
346 
347 		/** @irq.msix: irq info for platforms that support MSI-X */
348 		struct {
349 			/** @irq.msix.nvec: number of MSI-X interrupts */
350 			u16 nvec;
351 			/** @irq.msix.indexes: used to allocate MSI-X indexes */
352 			struct xarray indexes;
353 		} msix;
354 	} irq;
355 
356 	/** @ttm: ttm device */
357 	struct ttm_device ttm;
358 
359 	/** @mmio: mmio info for device */
360 	struct {
361 		/** @mmio.size: size of MMIO space for device */
362 		size_t size;
363 		/** @mmio.regs: pointer to MMIO space for device */
364 		void __iomem *regs;
365 	} mmio;
366 
367 	/** @mem: memory info for device */
368 	struct {
369 		/** @mem.vram: VRAM info for device */
370 		struct xe_vram_region *vram;
371 		/** @mem.sys_mgr: system TTM manager */
372 		struct ttm_resource_manager sys_mgr;
373 		/** @mem.sys_mgr: system memory shrinker. */
374 		struct xe_shrinker *shrinker;
375 	} mem;
376 
377 	/** @sriov: device level virtualization data */
378 	struct {
379 		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
380 		enum xe_sriov_mode __mode;
381 
382 		union {
383 			/** @sriov.pf: PF specific data */
384 			struct xe_device_pf pf;
385 			/** @sriov.vf: VF specific data */
386 			struct xe_device_vf vf;
387 		};
388 
389 		/** @sriov.wq: workqueue used by the virtualization workers */
390 		struct workqueue_struct *wq;
391 	} sriov;
392 
393 	/** @usm: unified memory state */
394 	struct {
395 		/** @usm.asid: convert a ASID to VM */
396 		struct xarray asid_to_vm;
397 		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
398 		u32 next_asid;
399 		/** @usm.lock: protects UM state */
400 		struct rw_semaphore lock;
401 	} usm;
402 
403 	/** @pinned: pinned BO state */
404 	struct {
405 		/** @pinned.lock: protected pinned BO list state */
406 		spinlock_t lock;
407 		/** @pinned.early: early pinned lists */
408 		struct {
409 			/** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
410 			struct list_head kernel_bo_present;
411 			/** @pinned.early.evicted: pinned BO that have been evicted */
412 			struct list_head evicted;
413 		} early;
414 		/** @pinned.late: late pinned lists */
415 		struct {
416 			/** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
417 			struct list_head kernel_bo_present;
418 			/** @pinned.late.evicted: pinned BO that have been evicted */
419 			struct list_head evicted;
420 			/** @pinned.external: pinned external and dma-buf. */
421 			struct list_head external;
422 		} late;
423 	} pinned;
424 
425 	/** @ufence_wq: user fence wait queue */
426 	wait_queue_head_t ufence_wq;
427 
428 	/** @preempt_fence_wq: used to serialize preempt fences */
429 	struct workqueue_struct *preempt_fence_wq;
430 
431 	/** @ordered_wq: used to serialize compute mode resume */
432 	struct workqueue_struct *ordered_wq;
433 
434 	/** @unordered_wq: used to serialize unordered work */
435 	struct workqueue_struct *unordered_wq;
436 
437 	/** @destroy_wq: used to serialize user destroy work, like queue */
438 	struct workqueue_struct *destroy_wq;
439 
440 	/** @tiles: device tiles */
441 	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
442 
443 	/**
444 	 * @mem_access: keep track of memory access in the device, possibly
445 	 * triggering additional actions when they occur.
446 	 */
447 	struct {
448 		/**
449 		 * @mem_access.vram_userfault: Encapsulate vram_userfault
450 		 * related stuff
451 		 */
452 		struct {
453 			/**
454 			 * @mem_access.vram_userfault.lock: Protects access to
455 			 * @vram_usefault.list Using mutex instead of spinlock
456 			 * as lock is applied to entire list operation which
457 			 * may sleep
458 			 */
459 			struct mutex lock;
460 
461 			/**
462 			 * @mem_access.vram_userfault.list: Keep list of userfaulted
463 			 * vram bo, which require to release their mmap mappings
464 			 * at runtime suspend path
465 			 */
466 			struct list_head list;
467 		} vram_userfault;
468 	} mem_access;
469 
470 	/**
471 	 * @pat: Encapsulate PAT related stuff
472 	 */
473 	struct {
474 		/** @pat.ops: Internal operations to abstract platforms */
475 		const struct xe_pat_ops *ops;
476 		/** @pat.table: PAT table to program in the HW */
477 		const struct xe_pat_table_entry *table;
478 		/** @pat.n_entries: Number of PAT entries */
479 		int n_entries;
480 		/** @pat.ats_entry: PAT entry for PCIe ATS responses */
481 		const struct xe_pat_table_entry *pat_ats;
482 		/** @pat.pta_entry: PAT entry for page table accesses */
483 		const struct xe_pat_table_entry *pat_pta;
484 		u32 idx[__XE_CACHE_LEVEL_COUNT];
485 	} pat;
486 
487 	/** @d3cold: Encapsulate d3cold related stuff */
488 	struct {
489 		/** @d3cold.capable: Indicates if root port is d3cold capable */
490 		bool capable;
491 
492 		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
493 		bool allowed;
494 
495 		/**
496 		 * @d3cold.vram_threshold:
497 		 *
498 		 * This represents the permissible threshold(in megabytes)
499 		 * for vram save/restore. d3cold will be disallowed,
500 		 * when vram_usages is above or equals the threshold value
501 		 * to avoid the vram save/restore latency.
502 		 * Default threshold value is 300mb.
503 		 */
504 		u32 vram_threshold;
505 		/** @d3cold.lock: protect vram_threshold */
506 		struct mutex lock;
507 	} d3cold;
508 
509 	/** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
510 	struct notifier_block pm_notifier;
511 	/** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */
512 	struct completion pm_block;
513 	/** @rebind_resume_list: List of wq items to kick on resume. */
514 	struct list_head rebind_resume_list;
515 	/** @rebind_resume_lock: Lock to protect the rebind_resume_list */
516 	struct mutex rebind_resume_lock;
517 
518 	/** @pmt: Support the PMT driver callback interface */
519 	struct {
520 		/** @pmt.lock: protect access for telemetry data */
521 		struct mutex lock;
522 	} pmt;
523 
524 	/**
525 	 * @pm_callback_task: Track the active task that is running in either
526 	 * the runtime_suspend or runtime_resume callbacks.
527 	 */
528 	struct task_struct *pm_callback_task;
529 
530 	/** @hwmon: hwmon subsystem integration */
531 	struct xe_hwmon *hwmon;
532 
533 	/** @heci_gsc: graphics security controller */
534 	struct xe_heci_gsc heci_gsc;
535 
536 	/** @nvm: discrete graphics non-volatile memory */
537 	struct intel_dg_nvm_dev *nvm;
538 
539 	/** @late_bind: xe mei late bind interface */
540 	struct xe_late_bind late_bind;
541 
542 	/** @oa: oa observation subsystem */
543 	struct xe_oa oa;
544 
545 	/** @pxp: Encapsulate Protected Xe Path support */
546 	struct xe_pxp *pxp;
547 
548 	/** @needs_flr_on_fini: requests function-reset on fini */
549 	bool needs_flr_on_fini;
550 
551 	/** @wedged: Struct to control Wedged States and mode */
552 	struct {
553 		/** @wedged.flag: Xe device faced a critical error and is now blocked. */
554 		atomic_t flag;
555 		/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
556 		int mode;
557 		/** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
558 		unsigned long method;
559 	} wedged;
560 
561 	/** @bo_device: Struct to control async free of BOs */
562 	struct xe_bo_dev {
563 		/** @bo_device.async_free: Free worker */
564 		struct work_struct async_free;
565 		/** @bo_device.async_list: List of BOs to be freed */
566 		struct llist_head async_list;
567 	} bo_device;
568 
569 	/** @pmu: performance monitoring unit */
570 	struct xe_pmu pmu;
571 
572 	/** @i2c: I2C host controller */
573 	struct xe_i2c *i2c;
574 
575 	/** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
576 	u32 atomic_svm_timeslice_ms;
577 
578 #ifdef TEST_VM_OPS_ERROR
579 	/**
580 	 * @vm_inject_error_position: inject errors at different places in VM
581 	 * bind IOCTL based on this value
582 	 */
583 	u8 vm_inject_error_position;
584 #endif
585 
586 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
587 	/**
588 	 * @global_total_pages: global GPU page usage tracked for gpu_mem
589 	 * tracepoints
590 	 */
591 	atomic64_t global_total_pages;
592 #endif
593 	/** @val: The domain for exhaustive eviction, which is currently per device. */
594 	struct xe_validation_device val;
595 
596 	/** @psmi: GPU debugging via additional validation HW */
597 	struct {
598 		/** @psmi.capture_obj: PSMI buffer for VRAM */
599 		struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1];
600 		/** @psmi.region_mask: Mask of valid memory regions */
601 		u8 region_mask;
602 	} psmi;
603 
604 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
605 	/** @g2g_test_array: for testing G2G communications */
606 	u32 *g2g_test_array;
607 	/** @g2g_test_count: for testing G2G communications */
608 	atomic_t g2g_test_count;
609 #endif
610 
611 	/* private: */
612 
613 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
614 	/*
615 	 * Any fields below this point are the ones used by display.
616 	 * They are temporarily added here so xe_device can be desguised as
617 	 * drm_i915_private during build. After cleanup these should go away,
618 	 * migrating to the right sub-structs
619 	 */
620 	struct intel_display *display;
621 
622 	const struct dram_info *dram_info;
623 
624 	/*
625 	 * edram size in MB.
626 	 * Cannot be determined by PCIID. You must always read a register.
627 	 */
628 	u32 edram_size_mb;
629 
630 	/* To shut up runtime pm macros.. */
631 	struct xe_runtime_pm {} runtime_pm;
632 
633 	/* only to allow build, not used functionally */
634 	u32 irq_mask;
635 
636 	struct intel_uncore {
637 		spinlock_t lock;
638 	} uncore;
639 
640 	/* only to allow build, not used functionally */
641 	struct {
642 		unsigned int hpll_freq;
643 		unsigned int czclk_freq;
644 	};
645 #endif
646 };
647 
648 /**
649  * struct xe_file - file handle for XE driver
650  */
651 struct xe_file {
652 	/** @xe: xe DEVICE **/
653 	struct xe_device *xe;
654 
655 	/** @drm: base DRM file */
656 	struct drm_file *drm;
657 
658 	/** @vm: VM state for file */
659 	struct {
660 		/** @vm.xe: xarray to store VMs */
661 		struct xarray xa;
662 		/**
663 		 * @vm.lock: Protects VM lookup + reference and removal from
664 		 * file xarray. Not an intended to be an outer lock which does
665 		 * thing while being held.
666 		 */
667 		struct mutex lock;
668 	} vm;
669 
670 	/** @exec_queue: Submission exec queue state for file */
671 	struct {
672 		/** @exec_queue.xa: xarray to store exece queues */
673 		struct xarray xa;
674 		/**
675 		 * @exec_queue.lock: Protects exec queue lookup + reference and
676 		 * removal from file xarray. Not intended to be an outer lock
677 		 * which does things while being held.
678 		 */
679 		struct mutex lock;
680 		/**
681 		 * @exec_queue.pending_removal: items pending to be removed to
682 		 * synchronize GPU state update with ongoing query.
683 		 */
684 		atomic_t pending_removal;
685 	} exec_queue;
686 
687 	/** @run_ticks: hw engine class run time in ticks for this drm client */
688 	u64 run_ticks[XE_ENGINE_CLASS_MAX];
689 
690 	/** @client: drm client */
691 	struct xe_drm_client *client;
692 
693 	/**
694 	 * @process_name: process name for file handle, used to safely output
695 	 * during error situations where xe file can outlive process
696 	 */
697 	char *process_name;
698 
699 	/**
700 	 * @pid: pid for file handle, used to safely output uring error
701 	 * situations where xe file can outlive process
702 	 */
703 	pid_t pid;
704 
705 	/** @refcount: ref count of this xe file */
706 	struct kref refcount;
707 };
708 
709 #endif
710