xref: /linux/drivers/gpu/drm/xe/xe_device_types.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022-2023 Intel Corporation
4  */
5 
6 #ifndef _XE_DEVICE_TYPES_H_
7 #define _XE_DEVICE_TYPES_H_
8 
9 #include <linux/pci.h>
10 
11 #include <drm/drm_device.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_pagemap.h>
14 #include <drm/ttm/ttm_device.h>
15 
16 #include "xe_devcoredump_types.h"
17 #include "xe_heci_gsc.h"
18 #include "xe_lmtt_types.h"
19 #include "xe_memirq_types.h"
20 #include "xe_oa_types.h"
21 #include "xe_platform_types.h"
22 #include "xe_pmu_types.h"
23 #include "xe_pt_types.h"
24 #include "xe_sriov_pf_types.h"
25 #include "xe_sriov_types.h"
26 #include "xe_sriov_vf_types.h"
27 #include "xe_step_types.h"
28 #include "xe_survivability_mode_types.h"
29 #include "xe_ttm_vram_mgr_types.h"
30 
31 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
32 #define TEST_VM_OPS_ERROR
33 #endif
34 
35 struct dram_info;
36 struct intel_display;
37 struct intel_dg_nvm_dev;
38 struct xe_ggtt;
39 struct xe_i2c;
40 struct xe_pat_ops;
41 struct xe_pxp;
42 
43 #define XE_BO_INVALID_OFFSET	LONG_MAX
44 
45 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
46 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
47 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
48 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
49 #define IS_DGFX(xe) ((xe)->info.is_dgfx)
50 
51 #define XE_VRAM_FLAGS_NEED64K		BIT(0)
52 
53 #define XE_GT0		0
54 #define XE_GT1		1
55 #define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
56 
57 #define XE_MAX_ASID	(BIT(20))
58 
59 #define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step)	\
60 	((_xe)->info.platform == (_platform) &&			\
61 	 (_xe)->info.step.graphics >= (min_step) &&		\
62 	 (_xe)->info.step.graphics < (max_step))
63 #define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step)	\
64 	((_xe)->info.platform == (_platform) &&				\
65 	 (_xe)->info.subplatform == (sub) &&				\
66 	 (_xe)->info.step.graphics >= (min_step) &&			\
67 	 (_xe)->info.step.graphics < (max_step))
68 
69 #define tile_to_xe(tile__)								\
70 	_Generic(tile__,								\
71 		 const struct xe_tile * : (const struct xe_device *)((tile__)->xe),	\
72 		 struct xe_tile * : (tile__)->xe)
73 
74 /**
75  * struct xe_vram_region - memory region structure
76  * This is used to describe a memory region in xe
77  * device, such as HBM memory or CXL extension memory.
78  */
79 struct xe_vram_region {
80 	/** @io_start: IO start address of this VRAM instance */
81 	resource_size_t io_start;
82 	/**
83 	 * @io_size: IO size of this VRAM instance
84 	 *
85 	 * This represents how much of this VRAM we can access
86 	 * via the CPU through the VRAM BAR. This can be smaller
87 	 * than @usable_size, in which case only part of VRAM is CPU
88 	 * accessible (typically the first 256M). This
89 	 * configuration is known as small-bar.
90 	 */
91 	resource_size_t io_size;
92 	/** @dpa_base: This memory regions's DPA (device physical address) base */
93 	resource_size_t dpa_base;
94 	/**
95 	 * @usable_size: usable size of VRAM
96 	 *
97 	 * Usable size of VRAM excluding reserved portions
98 	 * (e.g stolen mem)
99 	 */
100 	resource_size_t usable_size;
101 	/**
102 	 * @actual_physical_size: Actual VRAM size
103 	 *
104 	 * Actual VRAM size including reserved portions
105 	 * (e.g stolen mem)
106 	 */
107 	resource_size_t actual_physical_size;
108 	/** @mapping: pointer to VRAM mappable space */
109 	void __iomem *mapping;
110 	/** @ttm: VRAM TTM manager */
111 	struct xe_ttm_vram_mgr ttm;
112 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
113 	/** @pagemap: Used to remap device memory as ZONE_DEVICE */
114 	struct dev_pagemap pagemap;
115 	/**
116 	 * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
117 	 * pages of this tile.
118 	 */
119 	struct drm_pagemap dpagemap;
120 	/**
121 	 * @hpa_base: base host physical address
122 	 *
123 	 * This is generated when remap device memory as ZONE_DEVICE
124 	 */
125 	resource_size_t hpa_base;
126 #endif
127 };
128 
129 /**
130  * struct xe_mmio - register mmio structure
131  *
132  * Represents an MMIO region that the CPU may use to access registers.  A
133  * region may share its IO map with other regions (e.g., all GTs within a
134  * tile share the same map with their parent tile, but represent different
135  * subregions of the overall IO space).
136  */
137 struct xe_mmio {
138 	/** @tile: Backpointer to tile, used for tracing */
139 	struct xe_tile *tile;
140 
141 	/** @regs: Map used to access registers. */
142 	void __iomem *regs;
143 
144 	/**
145 	 * @sriov_vf_gt: Backpointer to GT.
146 	 *
147 	 * This pointer is only set for GT MMIO regions and only when running
148 	 * as an SRIOV VF structure
149 	 */
150 	struct xe_gt *sriov_vf_gt;
151 
152 	/**
153 	 * @regs_size: Length of the register region within the map.
154 	 *
155 	 * The size of the iomap set in *regs is generally larger than the
156 	 * register mmio space since it includes unused regions and/or
157 	 * non-register regions such as the GGTT PTEs.
158 	 */
159 	size_t regs_size;
160 
161 	/** @adj_limit: adjust MMIO address if address is below this value */
162 	u32 adj_limit;
163 
164 	/** @adj_offset: offset to add to MMIO address when adjusting */
165 	u32 adj_offset;
166 };
167 
168 /**
169  * struct xe_tile - hardware tile structure
170  *
171  * From a driver perspective, a "tile" is effectively a complete GPU, containing
172  * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
173  *
174  * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
175  * device and designate one "root" tile as being responsible for external PCI
176  * communication.  PCI BAR0 exposes the GGTT and MMIO register space for each
177  * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
178  * with each tile similarly.  Device-wide interrupts can be enabled/disabled
179  * at the root tile, and the MSTR_TILE_INTR register will report which tiles
180  * have interrupts that need servicing.
181  */
182 struct xe_tile {
183 	/** @xe: Backpointer to tile's PCI device */
184 	struct xe_device *xe;
185 
186 	/** @id: ID of the tile */
187 	u8 id;
188 
189 	/**
190 	 * @primary_gt: Primary GT
191 	 */
192 	struct xe_gt *primary_gt;
193 
194 	/**
195 	 * @media_gt: Media GT
196 	 *
197 	 * Only present on devices with media version >= 13.
198 	 */
199 	struct xe_gt *media_gt;
200 
201 	/**
202 	 * @mmio: MMIO info for a tile.
203 	 *
204 	 * Each tile has its own 16MB space in BAR0, laid out as:
205 	 * * 0-4MB: registers
206 	 * * 4MB-8MB: reserved
207 	 * * 8MB-16MB: global GTT
208 	 */
209 	struct xe_mmio mmio;
210 
211 	/** @mem: memory management info for tile */
212 	struct {
213 		/**
214 		 * @mem.vram: VRAM info for tile.
215 		 *
216 		 * Although VRAM is associated with a specific tile, it can
217 		 * still be accessed by all tiles' GTs.
218 		 */
219 		struct xe_vram_region vram;
220 
221 		/** @mem.ggtt: Global graphics translation table */
222 		struct xe_ggtt *ggtt;
223 
224 		/**
225 		 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
226 		 *
227 		 * Media GT shares a pool with its primary GT.
228 		 */
229 		struct xe_sa_manager *kernel_bb_pool;
230 	} mem;
231 
232 	/** @sriov: tile level virtualization data */
233 	union {
234 		struct {
235 			/** @sriov.pf.lmtt: Local Memory Translation Table. */
236 			struct xe_lmtt lmtt;
237 		} pf;
238 		struct {
239 			/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
240 			struct xe_ggtt_node *ggtt_balloon[2];
241 		} vf;
242 	} sriov;
243 
244 	/** @memirq: Memory Based Interrupts. */
245 	struct xe_memirq memirq;
246 
247 	/** @pcode: tile's PCODE */
248 	struct {
249 		/** @pcode.lock: protecting tile's PCODE mailbox data */
250 		struct mutex lock;
251 	} pcode;
252 
253 	/** @migrate: Migration helper for vram blits and clearing */
254 	struct xe_migrate *migrate;
255 
256 	/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
257 	struct kobject *sysfs;
258 };
259 
260 /**
261  * struct xe_device - Top level struct of XE device
262  */
263 struct xe_device {
264 	/** @drm: drm device */
265 	struct drm_device drm;
266 
267 	/** @devcoredump: device coredump */
268 	struct xe_devcoredump devcoredump;
269 
270 	/** @info: device info */
271 	struct intel_device_info {
272 		/** @info.platform_name: platform name */
273 		const char *platform_name;
274 		/** @info.graphics_name: graphics IP name */
275 		const char *graphics_name;
276 		/** @info.media_name: media IP name */
277 		const char *media_name;
278 		/** @info.graphics_verx100: graphics IP version */
279 		u32 graphics_verx100;
280 		/** @info.media_verx100: media IP version */
281 		u32 media_verx100;
282 		/** @info.mem_region_mask: mask of valid memory regions */
283 		u32 mem_region_mask;
284 		/** @info.platform: XE platform enum */
285 		enum xe_platform platform;
286 		/** @info.subplatform: XE subplatform enum */
287 		enum xe_subplatform subplatform;
288 		/** @info.devid: device ID */
289 		u16 devid;
290 		/** @info.revid: device revision */
291 		u8 revid;
292 		/** @info.step: stepping information for each IP */
293 		struct xe_step_info step;
294 		/** @info.dma_mask_size: DMA address bits */
295 		u8 dma_mask_size;
296 		/** @info.vram_flags: Vram flags */
297 		u8 vram_flags;
298 		/** @info.tile_count: Number of tiles */
299 		u8 tile_count;
300 		/** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */
301 		u8 max_gt_per_tile;
302 		/** @info.gt_count: Total number of GTs for entire device */
303 		u8 gt_count;
304 		/** @info.vm_max_level: Max VM level */
305 		u8 vm_max_level;
306 		/** @info.va_bits: Maximum bits of a virtual address */
307 		u8 va_bits;
308 
309 		/*
310 		 * Keep all flags below alphabetically sorted
311 		 */
312 
313 		/** @info.force_execlist: Forced execlist submission */
314 		u8 force_execlist:1;
315 		/** @info.has_asid: Has address space ID */
316 		u8 has_asid:1;
317 		/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
318 		u8 has_atomic_enable_pte_bit:1;
319 		/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
320 		u8 has_device_atomics_on_smem:1;
321 		/** @info.has_fan_control: Device supports fan control */
322 		u8 has_fan_control:1;
323 		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
324 		u8 has_flat_ccs:1;
325 		/** @info.has_gsc_nvm: Device has gsc non-volatile memory */
326 		u8 has_gsc_nvm:1;
327 		/** @info.has_heci_cscfi: device has heci cscfi */
328 		u8 has_heci_cscfi:1;
329 		/** @info.has_heci_gscfi: device has heci gscfi */
330 		u8 has_heci_gscfi:1;
331 		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
332 		u8 has_llc:1;
333 		/** @info.has_mbx_power_limits: Device has support to manage power limits using
334 		 * pcode mailbox commands.
335 		 */
336 		u8 has_mbx_power_limits:1;
337 		/** @info.has_pxp: Device has PXP support */
338 		u8 has_pxp:1;
339 		/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
340 		u8 has_range_tlb_invalidation:1;
341 		/** @info.has_sriov: Supports SR-IOV */
342 		u8 has_sriov:1;
343 		/** @info.has_usm: Device has unified shared memory support */
344 		u8 has_usm:1;
345 		/** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
346 		u8 has_64bit_timestamp:1;
347 		/** @info.is_dgfx: is discrete device */
348 		u8 is_dgfx:1;
349 		/** @info.needs_scratch: needs scratch page for oob prefetch to work */
350 		u8 needs_scratch:1;
351 		/**
352 		 * @info.probe_display: Probe display hardware.  If set to
353 		 * false, the driver will behave as if there is no display
354 		 * hardware present and will not try to read/write to it in any
355 		 * way.  The display hardware, if it exists, will not be
356 		 * exposed to userspace and will be left untouched in whatever
357 		 * state the firmware or bootloader left it in.
358 		 */
359 		u8 probe_display:1;
360 		/** @info.skip_guc_pc: Skip GuC based PM feature init */
361 		u8 skip_guc_pc:1;
362 		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
363 		u8 skip_mtcfg:1;
364 		/** @info.skip_pcode: skip access to PCODE uC */
365 		u8 skip_pcode:1;
366 	} info;
367 
368 	/** @wa_active: keep track of active workarounds */
369 	struct {
370 		/** @wa_active.oob: bitmap with active OOB workarounds */
371 		unsigned long *oob;
372 
373 		/**
374 		 * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse
375 		 * of XE_DEVICE_WA() - it can only be called on initialization after
376 		 * Device OOB WAs have been processed.
377 		 */
378 		bool oob_initialized;
379 	} wa_active;
380 
381 	/** @survivability: survivability information for device */
382 	struct xe_survivability survivability;
383 
384 	/** @irq: device interrupt state */
385 	struct {
386 		/** @irq.lock: lock for processing irq's on this device */
387 		spinlock_t lock;
388 
389 		/** @irq.enabled: interrupts enabled on this device */
390 		atomic_t enabled;
391 
392 		/** @irq.msix: irq info for platforms that support MSI-X */
393 		struct {
394 			/** @irq.msix.nvec: number of MSI-X interrupts */
395 			u16 nvec;
396 			/** @irq.msix.indexes: used to allocate MSI-X indexes */
397 			struct xarray indexes;
398 		} msix;
399 	} irq;
400 
401 	/** @ttm: ttm device */
402 	struct ttm_device ttm;
403 
404 	/** @mmio: mmio info for device */
405 	struct {
406 		/** @mmio.size: size of MMIO space for device */
407 		size_t size;
408 		/** @mmio.regs: pointer to MMIO space for device */
409 		void __iomem *regs;
410 	} mmio;
411 
412 	/** @mem: memory info for device */
413 	struct {
414 		/** @mem.vram: VRAM info for device */
415 		struct xe_vram_region vram;
416 		/** @mem.sys_mgr: system TTM manager */
417 		struct ttm_resource_manager sys_mgr;
418 		/** @mem.sys_mgr: system memory shrinker. */
419 		struct xe_shrinker *shrinker;
420 	} mem;
421 
422 	/** @sriov: device level virtualization data */
423 	struct {
424 		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
425 		enum xe_sriov_mode __mode;
426 
427 		union {
428 			/** @sriov.pf: PF specific data */
429 			struct xe_device_pf pf;
430 			/** @sriov.vf: VF specific data */
431 			struct xe_device_vf vf;
432 		};
433 
434 		/** @sriov.wq: workqueue used by the virtualization workers */
435 		struct workqueue_struct *wq;
436 	} sriov;
437 
438 	/** @usm: unified memory state */
439 	struct {
440 		/** @usm.asid: convert a ASID to VM */
441 		struct xarray asid_to_vm;
442 		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
443 		u32 next_asid;
444 		/** @usm.lock: protects UM state */
445 		struct rw_semaphore lock;
446 	} usm;
447 
448 	/** @pinned: pinned BO state */
449 	struct {
450 		/** @pinned.lock: protected pinned BO list state */
451 		spinlock_t lock;
452 		/** @pinned.early: early pinned lists */
453 		struct {
454 			/** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
455 			struct list_head kernel_bo_present;
456 			/** @pinned.early.evicted: pinned BO that have been evicted */
457 			struct list_head evicted;
458 		} early;
459 		/** @pinned.late: late pinned lists */
460 		struct {
461 			/** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
462 			struct list_head kernel_bo_present;
463 			/** @pinned.late.evicted: pinned BO that have been evicted */
464 			struct list_head evicted;
465 			/** @pinned.external: pinned external and dma-buf. */
466 			struct list_head external;
467 		} late;
468 	} pinned;
469 
470 	/** @ufence_wq: user fence wait queue */
471 	wait_queue_head_t ufence_wq;
472 
473 	/** @preempt_fence_wq: used to serialize preempt fences */
474 	struct workqueue_struct *preempt_fence_wq;
475 
476 	/** @ordered_wq: used to serialize compute mode resume */
477 	struct workqueue_struct *ordered_wq;
478 
479 	/** @unordered_wq: used to serialize unordered work, mostly display */
480 	struct workqueue_struct *unordered_wq;
481 
482 	/** @destroy_wq: used to serialize user destroy work, like queue */
483 	struct workqueue_struct *destroy_wq;
484 
485 	/** @tiles: device tiles */
486 	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
487 
488 	/**
489 	 * @mem_access: keep track of memory access in the device, possibly
490 	 * triggering additional actions when they occur.
491 	 */
492 	struct {
493 		/**
494 		 * @mem_access.vram_userfault: Encapsulate vram_userfault
495 		 * related stuff
496 		 */
497 		struct {
498 			/**
499 			 * @mem_access.vram_userfault.lock: Protects access to
500 			 * @vram_usefault.list Using mutex instead of spinlock
501 			 * as lock is applied to entire list operation which
502 			 * may sleep
503 			 */
504 			struct mutex lock;
505 
506 			/**
507 			 * @mem_access.vram_userfault.list: Keep list of userfaulted
508 			 * vram bo, which require to release their mmap mappings
509 			 * at runtime suspend path
510 			 */
511 			struct list_head list;
512 		} vram_userfault;
513 	} mem_access;
514 
515 	/**
516 	 * @pat: Encapsulate PAT related stuff
517 	 */
518 	struct {
519 		/** @pat.ops: Internal operations to abstract platforms */
520 		const struct xe_pat_ops *ops;
521 		/** @pat.table: PAT table to program in the HW */
522 		const struct xe_pat_table_entry *table;
523 		/** @pat.n_entries: Number of PAT entries */
524 		int n_entries;
525 		/** @pat.ats_entry: PAT entry for PCIe ATS responses */
526 		const struct xe_pat_table_entry *pat_ats;
527 		/** @pat.pta_entry: PAT entry for page table accesses */
528 		const struct xe_pat_table_entry *pat_pta;
529 		u32 idx[__XE_CACHE_LEVEL_COUNT];
530 	} pat;
531 
532 	/** @d3cold: Encapsulate d3cold related stuff */
533 	struct {
534 		/** @d3cold.capable: Indicates if root port is d3cold capable */
535 		bool capable;
536 
537 		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
538 		bool allowed;
539 
540 		/**
541 		 * @d3cold.vram_threshold:
542 		 *
543 		 * This represents the permissible threshold(in megabytes)
544 		 * for vram save/restore. d3cold will be disallowed,
545 		 * when vram_usages is above or equals the threshold value
546 		 * to avoid the vram save/restore latency.
547 		 * Default threshold value is 300mb.
548 		 */
549 		u32 vram_threshold;
550 		/** @d3cold.lock: protect vram_threshold */
551 		struct mutex lock;
552 	} d3cold;
553 
554 	/** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
555 	struct notifier_block pm_notifier;
556 
557 	/** @pmt: Support the PMT driver callback interface */
558 	struct {
559 		/** @pmt.lock: protect access for telemetry data */
560 		struct mutex lock;
561 	} pmt;
562 
563 	/**
564 	 * @pm_callback_task: Track the active task that is running in either
565 	 * the runtime_suspend or runtime_resume callbacks.
566 	 */
567 	struct task_struct *pm_callback_task;
568 
569 	/** @hwmon: hwmon subsystem integration */
570 	struct xe_hwmon *hwmon;
571 
572 	/** @heci_gsc: graphics security controller */
573 	struct xe_heci_gsc heci_gsc;
574 
575 	/** @nvm: discrete graphics non-volatile memory */
576 	struct intel_dg_nvm_dev *nvm;
577 
578 	/** @oa: oa observation subsystem */
579 	struct xe_oa oa;
580 
581 	/** @pxp: Encapsulate Protected Xe Path support */
582 	struct xe_pxp *pxp;
583 
584 	/** @needs_flr_on_fini: requests function-reset on fini */
585 	bool needs_flr_on_fini;
586 
587 	/** @wedged: Struct to control Wedged States and mode */
588 	struct {
589 		/** @wedged.flag: Xe device faced a critical error and is now blocked. */
590 		atomic_t flag;
591 		/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
592 		int mode;
593 	} wedged;
594 
595 	/** @bo_device: Struct to control async free of BOs */
596 	struct xe_bo_dev {
597 		/** @bo_device.async_free: Free worker */
598 		struct work_struct async_free;
599 		/** @bo_device.async_list: List of BOs to be freed */
600 		struct llist_head async_list;
601 	} bo_device;
602 
603 	/** @pmu: performance monitoring unit */
604 	struct xe_pmu pmu;
605 
606 	/** @i2c: I2C host controller */
607 	struct xe_i2c *i2c;
608 
609 	/** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
610 	u32 atomic_svm_timeslice_ms;
611 
612 #ifdef TEST_VM_OPS_ERROR
613 	/**
614 	 * @vm_inject_error_position: inject errors at different places in VM
615 	 * bind IOCTL based on this value
616 	 */
617 	u8 vm_inject_error_position;
618 #endif
619 
620 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
621 	/**
622 	 * @global_total_pages: global GPU page usage tracked for gpu_mem
623 	 * tracepoints
624 	 */
625 	atomic64_t global_total_pages;
626 #endif
627 
628 	/* private: */
629 
630 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
631 	/*
632 	 * Any fields below this point are the ones used by display.
633 	 * They are temporarily added here so xe_device can be desguised as
634 	 * drm_i915_private during build. After cleanup these should go away,
635 	 * migrating to the right sub-structs
636 	 */
637 	struct intel_display *display;
638 
639 	const struct dram_info *dram_info;
640 
641 	/*
642 	 * edram size in MB.
643 	 * Cannot be determined by PCIID. You must always read a register.
644 	 */
645 	u32 edram_size_mb;
646 
647 	/* To shut up runtime pm macros.. */
648 	struct xe_runtime_pm {} runtime_pm;
649 
650 	/* only to allow build, not used functionally */
651 	u32 irq_mask;
652 
653 	struct intel_uncore {
654 		spinlock_t lock;
655 	} uncore;
656 
657 	/* only to allow build, not used functionally */
658 	struct {
659 		unsigned int hpll_freq;
660 		unsigned int czclk_freq;
661 		unsigned int fsb_freq, mem_freq, is_ddr3;
662 	};
663 #endif
664 };
665 
666 /**
667  * struct xe_file - file handle for XE driver
668  */
669 struct xe_file {
670 	/** @xe: xe DEVICE **/
671 	struct xe_device *xe;
672 
673 	/** @drm: base DRM file */
674 	struct drm_file *drm;
675 
676 	/** @vm: VM state for file */
677 	struct {
678 		/** @vm.xe: xarray to store VMs */
679 		struct xarray xa;
680 		/**
681 		 * @vm.lock: Protects VM lookup + reference and removal from
682 		 * file xarray. Not an intended to be an outer lock which does
683 		 * thing while being held.
684 		 */
685 		struct mutex lock;
686 	} vm;
687 
688 	/** @exec_queue: Submission exec queue state for file */
689 	struct {
690 		/** @exec_queue.xa: xarray to store exece queues */
691 		struct xarray xa;
692 		/**
693 		 * @exec_queue.lock: Protects exec queue lookup + reference and
694 		 * removal from file xarray. Not intended to be an outer lock
695 		 * which does things while being held.
696 		 */
697 		struct mutex lock;
698 		/**
699 		 * @exec_queue.pending_removal: items pending to be removed to
700 		 * synchronize GPU state update with ongoing query.
701 		 */
702 		atomic_t pending_removal;
703 	} exec_queue;
704 
705 	/** @run_ticks: hw engine class run time in ticks for this drm client */
706 	u64 run_ticks[XE_ENGINE_CLASS_MAX];
707 
708 	/** @client: drm client */
709 	struct xe_drm_client *client;
710 
711 	/**
712 	 * @process_name: process name for file handle, used to safely output
713 	 * during error situations where xe file can outlive process
714 	 */
715 	char *process_name;
716 
717 	/**
718 	 * @pid: pid for file handle, used to safely output uring error
719 	 * situations where xe file can outlive process
720 	 */
721 	pid_t pid;
722 
723 	/** @refcount: ref count of this xe file */
724 	struct kref refcount;
725 };
726 
727 #endif
728