1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022-2023 Intel Corporation 4 */ 5 6 #ifndef _XE_DEVICE_TYPES_H_ 7 #define _XE_DEVICE_TYPES_H_ 8 9 #include <linux/pci.h> 10 11 #include <drm/drm_device.h> 12 #include <drm/drm_file.h> 13 #include <drm/ttm/ttm_device.h> 14 15 #include "xe_devcoredump_types.h" 16 #include "xe_heci_gsc.h" 17 #include "xe_late_bind_fw_types.h" 18 #include "xe_lmtt_types.h" 19 #include "xe_memirq_types.h" 20 #include "xe_oa_types.h" 21 #include "xe_pagefault_types.h" 22 #include "xe_platform_types.h" 23 #include "xe_pmu_types.h" 24 #include "xe_pt_types.h" 25 #include "xe_sriov_pf_types.h" 26 #include "xe_sriov_types.h" 27 #include "xe_sriov_vf_types.h" 28 #include "xe_sriov_vf_ccs_types.h" 29 #include "xe_step_types.h" 30 #include "xe_survivability_mode_types.h" 31 #include "xe_tile_sriov_vf_types.h" 32 #include "xe_validation.h" 33 34 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 35 #define TEST_VM_OPS_ERROR 36 #endif 37 38 struct dram_info; 39 struct intel_display; 40 struct intel_dg_nvm_dev; 41 struct xe_ggtt; 42 struct xe_i2c; 43 struct xe_pat_ops; 44 struct xe_pxp; 45 struct xe_vram_region; 46 47 #define XE_BO_INVALID_OFFSET LONG_MAX 48 49 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100) 50 #define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100) 51 #define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100) 52 #define MEDIA_VERx100(xe) ((xe)->info.media_verx100) 53 #define IS_DGFX(xe) ((xe)->info.is_dgfx) 54 55 #define XE_VRAM_FLAGS_NEED64K BIT(0) 56 57 #define XE_GT0 0 58 #define XE_GT1 1 59 #define XE_MAX_TILES_PER_DEVICE (XE_GT1 + 1) 60 61 #define XE_MAX_ASID (BIT(20)) 62 63 #define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step) \ 64 ((_xe)->info.platform == (_platform) && \ 65 (_xe)->info.step.graphics >= (min_step) && \ 66 (_xe)->info.step.graphics < (max_step)) 67 #define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step) \ 68 ((_xe)->info.platform == (_platform) && \ 69 (_xe)->info.subplatform == (sub) && \ 70 (_xe)->info.step.graphics >= (min_step) && \ 71 (_xe)->info.step.graphics < (max_step)) 72 73 #define tile_to_xe(tile__) \ 74 _Generic(tile__, \ 75 const struct xe_tile * : (const struct xe_device *)((tile__)->xe), \ 76 struct xe_tile * : (tile__)->xe) 77 78 /** 79 * struct xe_mmio - register mmio structure 80 * 81 * Represents an MMIO region that the CPU may use to access registers. A 82 * region may share its IO map with other regions (e.g., all GTs within a 83 * tile share the same map with their parent tile, but represent different 84 * subregions of the overall IO space). 85 */ 86 struct xe_mmio { 87 /** @tile: Backpointer to tile, used for tracing */ 88 struct xe_tile *tile; 89 90 /** @regs: Map used to access registers. */ 91 void __iomem *regs; 92 93 /** 94 * @sriov_vf_gt: Backpointer to GT. 95 * 96 * This pointer is only set for GT MMIO regions and only when running 97 * as an SRIOV VF structure 98 */ 99 struct xe_gt *sriov_vf_gt; 100 101 /** 102 * @regs_size: Length of the register region within the map. 103 * 104 * The size of the iomap set in *regs is generally larger than the 105 * register mmio space since it includes unused regions and/or 106 * non-register regions such as the GGTT PTEs. 107 */ 108 size_t regs_size; 109 110 /** @adj_limit: adjust MMIO address if address is below this value */ 111 u32 adj_limit; 112 113 /** @adj_offset: offset to add to MMIO address when adjusting */ 114 u32 adj_offset; 115 }; 116 117 /** 118 * struct xe_tile - hardware tile structure 119 * 120 * From a driver perspective, a "tile" is effectively a complete GPU, containing 121 * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM. 122 * 123 * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI 124 * device and designate one "root" tile as being responsible for external PCI 125 * communication. PCI BAR0 exposes the GGTT and MMIO register space for each 126 * tile in a stacked layout, and PCI BAR2 exposes the local memory associated 127 * with each tile similarly. Device-wide interrupts can be enabled/disabled 128 * at the root tile, and the MSTR_TILE_INTR register will report which tiles 129 * have interrupts that need servicing. 130 */ 131 struct xe_tile { 132 /** @xe: Backpointer to tile's PCI device */ 133 struct xe_device *xe; 134 135 /** @id: ID of the tile */ 136 u8 id; 137 138 /** 139 * @primary_gt: Primary GT 140 */ 141 struct xe_gt *primary_gt; 142 143 /** 144 * @media_gt: Media GT 145 * 146 * Only present on devices with media version >= 13. 147 */ 148 struct xe_gt *media_gt; 149 150 /** 151 * @mmio: MMIO info for a tile. 152 * 153 * Each tile has its own 16MB space in BAR0, laid out as: 154 * * 0-4MB: registers 155 * * 4MB-8MB: reserved 156 * * 8MB-16MB: global GTT 157 */ 158 struct xe_mmio mmio; 159 160 /** @mem: memory management info for tile */ 161 struct { 162 /** 163 * @mem.kernel_vram: kernel-dedicated VRAM info for tile. 164 * 165 * Although VRAM is associated with a specific tile, it can 166 * still be accessed by all tiles' GTs. 167 */ 168 struct xe_vram_region *kernel_vram; 169 170 /** 171 * @mem.vram: general purpose VRAM info for tile. 172 * 173 * Although VRAM is associated with a specific tile, it can 174 * still be accessed by all tiles' GTs. 175 */ 176 struct xe_vram_region *vram; 177 178 /** @mem.ggtt: Global graphics translation table */ 179 struct xe_ggtt *ggtt; 180 181 /** 182 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated. 183 * 184 * Media GT shares a pool with its primary GT. 185 */ 186 struct xe_sa_manager *kernel_bb_pool; 187 } mem; 188 189 /** @sriov: tile level virtualization data */ 190 union { 191 struct { 192 /** @sriov.pf.lmtt: Local Memory Translation Table. */ 193 struct xe_lmtt lmtt; 194 } pf; 195 struct { 196 /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */ 197 struct xe_ggtt_node *ggtt_balloon[2]; 198 /** @sriov.vf.self_config: VF configuration data */ 199 struct xe_tile_sriov_vf_selfconfig self_config; 200 } vf; 201 } sriov; 202 203 /** @memirq: Memory Based Interrupts. */ 204 struct xe_memirq memirq; 205 206 /** @csc_hw_error_work: worker to report CSC HW errors */ 207 struct work_struct csc_hw_error_work; 208 209 /** @pcode: tile's PCODE */ 210 struct { 211 /** @pcode.lock: protecting tile's PCODE mailbox data */ 212 struct mutex lock; 213 } pcode; 214 215 /** @migrate: Migration helper for vram blits and clearing */ 216 struct xe_migrate *migrate; 217 218 /** @sysfs: sysfs' kobj used by xe_tile_sysfs */ 219 struct kobject *sysfs; 220 221 /** @debugfs: debugfs directory associated with this tile */ 222 struct dentry *debugfs; 223 }; 224 225 /** 226 * struct xe_device - Top level struct of Xe device 227 */ 228 struct xe_device { 229 /** @drm: drm device */ 230 struct drm_device drm; 231 232 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 233 /** @display: display device data, must be placed after drm device member */ 234 struct intel_display *display; 235 #endif 236 237 /** @devcoredump: device coredump */ 238 struct xe_devcoredump devcoredump; 239 240 /** @info: device info */ 241 struct intel_device_info { 242 /** @info.platform_name: platform name */ 243 const char *platform_name; 244 /** @info.graphics_name: graphics IP name */ 245 const char *graphics_name; 246 /** @info.media_name: media IP name */ 247 const char *media_name; 248 /** @info.graphics_verx100: graphics IP version */ 249 u32 graphics_verx100; 250 /** @info.media_verx100: media IP version */ 251 u32 media_verx100; 252 /** @info.mem_region_mask: mask of valid memory regions */ 253 u32 mem_region_mask; 254 /** @info.platform: Xe platform enum */ 255 enum xe_platform platform; 256 /** @info.subplatform: Xe subplatform enum */ 257 enum xe_subplatform subplatform; 258 /** @info.devid: device ID */ 259 u16 devid; 260 /** @info.revid: device revision */ 261 u8 revid; 262 /** @info.step: stepping information for each IP */ 263 struct xe_step_info step; 264 /** @info.dma_mask_size: DMA address bits */ 265 u8 dma_mask_size; 266 /** @info.vram_flags: Vram flags */ 267 u8 vram_flags; 268 /** @info.tile_count: Number of tiles */ 269 u8 tile_count; 270 /** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */ 271 u8 max_gt_per_tile; 272 /** @info.gt_count: Total number of GTs for entire device */ 273 u8 gt_count; 274 /** @info.vm_max_level: Max VM level */ 275 u8 vm_max_level; 276 /** @info.va_bits: Maximum bits of a virtual address */ 277 u8 va_bits; 278 279 /* 280 * Keep all flags below alphabetically sorted 281 */ 282 283 /** @info.force_execlist: Forced execlist submission */ 284 u8 force_execlist:1; 285 /** @info.has_asid: Has address space ID */ 286 u8 has_asid:1; 287 /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */ 288 u8 has_atomic_enable_pte_bit:1; 289 /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */ 290 u8 has_device_atomics_on_smem:1; 291 /** @info.has_fan_control: Device supports fan control */ 292 u8 has_fan_control:1; 293 /** @info.has_flat_ccs: Whether flat CCS metadata is used */ 294 u8 has_flat_ccs:1; 295 /** @info.has_gsc_nvm: Device has gsc non-volatile memory */ 296 u8 has_gsc_nvm:1; 297 /** @info.has_heci_cscfi: device has heci cscfi */ 298 u8 has_heci_cscfi:1; 299 /** @info.has_heci_gscfi: device has heci gscfi */ 300 u8 has_heci_gscfi:1; 301 /** @info.has_late_bind: Device has firmware late binding support */ 302 u8 has_late_bind:1; 303 /** @info.has_llc: Device has a shared CPU+GPU last level cache */ 304 u8 has_llc:1; 305 /** @info.has_mbx_power_limits: Device has support to manage power limits using 306 * pcode mailbox commands. 307 */ 308 u8 has_mbx_power_limits:1; 309 /** @info.has_mem_copy_instr: Device supports MEM_COPY instruction */ 310 u8 has_mem_copy_instr:1; 311 /** @info.has_pxp: Device has PXP support */ 312 u8 has_pxp:1; 313 /** @info.has_range_tlb_inval: Has range based TLB invalidations */ 314 u8 has_range_tlb_inval:1; 315 /** @info.has_sriov: Supports SR-IOV */ 316 u8 has_sriov:1; 317 /** @info.has_usm: Device has unified shared memory support */ 318 u8 has_usm:1; 319 /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */ 320 u8 has_64bit_timestamp:1; 321 /** @info.is_dgfx: is discrete device */ 322 u8 is_dgfx:1; 323 /** @info.needs_scratch: needs scratch page for oob prefetch to work */ 324 u8 needs_scratch:1; 325 /** 326 * @info.probe_display: Probe display hardware. If set to 327 * false, the driver will behave as if there is no display 328 * hardware present and will not try to read/write to it in any 329 * way. The display hardware, if it exists, will not be 330 * exposed to userspace and will be left untouched in whatever 331 * state the firmware or bootloader left it in. 332 */ 333 u8 probe_display:1; 334 /** @info.skip_guc_pc: Skip GuC based PM feature init */ 335 u8 skip_guc_pc:1; 336 /** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */ 337 u8 skip_mtcfg:1; 338 /** @info.skip_pcode: skip access to PCODE uC */ 339 u8 skip_pcode:1; 340 /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */ 341 u8 needs_shared_vf_gt_wq:1; 342 } info; 343 344 /** @wa_active: keep track of active workarounds */ 345 struct { 346 /** @wa_active.oob: bitmap with active OOB workarounds */ 347 unsigned long *oob; 348 349 /** 350 * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse 351 * of XE_DEVICE_WA() - it can only be called on initialization after 352 * Device OOB WAs have been processed. 353 */ 354 bool oob_initialized; 355 } wa_active; 356 357 /** @survivability: survivability information for device */ 358 struct xe_survivability survivability; 359 360 /** @irq: device interrupt state */ 361 struct { 362 /** @irq.lock: lock for processing irq's on this device */ 363 spinlock_t lock; 364 365 /** @irq.enabled: interrupts enabled on this device */ 366 atomic_t enabled; 367 368 /** @irq.msix: irq info for platforms that support MSI-X */ 369 struct { 370 /** @irq.msix.nvec: number of MSI-X interrupts */ 371 u16 nvec; 372 /** @irq.msix.indexes: used to allocate MSI-X indexes */ 373 struct xarray indexes; 374 } msix; 375 } irq; 376 377 /** @ttm: ttm device */ 378 struct ttm_device ttm; 379 380 /** @mmio: mmio info for device */ 381 struct { 382 /** @mmio.size: size of MMIO space for device */ 383 size_t size; 384 /** @mmio.regs: pointer to MMIO space for device */ 385 void __iomem *regs; 386 } mmio; 387 388 /** @mem: memory info for device */ 389 struct { 390 /** @mem.vram: VRAM info for device */ 391 struct xe_vram_region *vram; 392 /** @mem.sys_mgr: system TTM manager */ 393 struct ttm_resource_manager sys_mgr; 394 /** @mem.sys_mgr: system memory shrinker. */ 395 struct xe_shrinker *shrinker; 396 } mem; 397 398 /** @sriov: device level virtualization data */ 399 struct { 400 /** @sriov.__mode: SR-IOV mode (Don't access directly!) */ 401 enum xe_sriov_mode __mode; 402 403 union { 404 /** @sriov.pf: PF specific data */ 405 struct xe_device_pf pf; 406 /** @sriov.vf: VF specific data */ 407 struct xe_device_vf vf; 408 }; 409 410 /** @sriov.wq: workqueue used by the virtualization workers */ 411 struct workqueue_struct *wq; 412 } sriov; 413 414 /** @usm: unified memory state */ 415 struct { 416 /** @usm.asid: convert a ASID to VM */ 417 struct xarray asid_to_vm; 418 /** @usm.next_asid: next ASID, used to cyclical alloc asids */ 419 u32 next_asid; 420 /** @usm.lock: protects UM state */ 421 struct rw_semaphore lock; 422 /** @usm.pf_wq: page fault work queue, unbound, high priority */ 423 struct workqueue_struct *pf_wq; 424 /* 425 * We pick 4 here because, in the current implementation, it 426 * yields the best bandwidth utilization of the kernel paging 427 * engine. 428 */ 429 #define XE_PAGEFAULT_QUEUE_COUNT 4 430 /** @usm.pf_queue: Page fault queues */ 431 struct xe_pagefault_queue pf_queue[XE_PAGEFAULT_QUEUE_COUNT]; 432 } usm; 433 434 /** @pinned: pinned BO state */ 435 struct { 436 /** @pinned.lock: protected pinned BO list state */ 437 spinlock_t lock; 438 /** @pinned.early: early pinned lists */ 439 struct { 440 /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */ 441 struct list_head kernel_bo_present; 442 /** @pinned.early.evicted: pinned BO that have been evicted */ 443 struct list_head evicted; 444 } early; 445 /** @pinned.late: late pinned lists */ 446 struct { 447 /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */ 448 struct list_head kernel_bo_present; 449 /** @pinned.late.evicted: pinned BO that have been evicted */ 450 struct list_head evicted; 451 /** @pinned.external: pinned external and dma-buf. */ 452 struct list_head external; 453 } late; 454 } pinned; 455 456 /** @ufence_wq: user fence wait queue */ 457 wait_queue_head_t ufence_wq; 458 459 /** @preempt_fence_wq: used to serialize preempt fences */ 460 struct workqueue_struct *preempt_fence_wq; 461 462 /** @ordered_wq: used to serialize compute mode resume */ 463 struct workqueue_struct *ordered_wq; 464 465 /** @unordered_wq: used to serialize unordered work */ 466 struct workqueue_struct *unordered_wq; 467 468 /** @destroy_wq: used to serialize user destroy work, like queue */ 469 struct workqueue_struct *destroy_wq; 470 471 /** @tiles: device tiles */ 472 struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE]; 473 474 /** 475 * @mem_access: keep track of memory access in the device, possibly 476 * triggering additional actions when they occur. 477 */ 478 struct { 479 /** 480 * @mem_access.vram_userfault: Encapsulate vram_userfault 481 * related stuff 482 */ 483 struct { 484 /** 485 * @mem_access.vram_userfault.lock: Protects access to 486 * @vram_usefault.list Using mutex instead of spinlock 487 * as lock is applied to entire list operation which 488 * may sleep 489 */ 490 struct mutex lock; 491 492 /** 493 * @mem_access.vram_userfault.list: Keep list of userfaulted 494 * vram bo, which require to release their mmap mappings 495 * at runtime suspend path 496 */ 497 struct list_head list; 498 } vram_userfault; 499 } mem_access; 500 501 /** 502 * @pat: Encapsulate PAT related stuff 503 */ 504 struct { 505 /** @pat.ops: Internal operations to abstract platforms */ 506 const struct xe_pat_ops *ops; 507 /** @pat.table: PAT table to program in the HW */ 508 const struct xe_pat_table_entry *table; 509 /** @pat.n_entries: Number of PAT entries */ 510 int n_entries; 511 /** @pat.ats_entry: PAT entry for PCIe ATS responses */ 512 const struct xe_pat_table_entry *pat_ats; 513 /** @pat.pta_entry: PAT entry for page table accesses */ 514 const struct xe_pat_table_entry *pat_pta; 515 u32 idx[__XE_CACHE_LEVEL_COUNT]; 516 } pat; 517 518 /** @d3cold: Encapsulate d3cold related stuff */ 519 struct { 520 /** @d3cold.capable: Indicates if root port is d3cold capable */ 521 bool capable; 522 523 /** @d3cold.allowed: Indicates if d3cold is a valid device state */ 524 bool allowed; 525 526 /** 527 * @d3cold.vram_threshold: 528 * 529 * This represents the permissible threshold(in megabytes) 530 * for vram save/restore. d3cold will be disallowed, 531 * when vram_usages is above or equals the threshold value 532 * to avoid the vram save/restore latency. 533 * Default threshold value is 300mb. 534 */ 535 u32 vram_threshold; 536 /** @d3cold.lock: protect vram_threshold */ 537 struct mutex lock; 538 } d3cold; 539 540 /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */ 541 struct notifier_block pm_notifier; 542 /** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */ 543 struct completion pm_block; 544 /** @rebind_resume_list: List of wq items to kick on resume. */ 545 struct list_head rebind_resume_list; 546 /** @rebind_resume_lock: Lock to protect the rebind_resume_list */ 547 struct mutex rebind_resume_lock; 548 549 /** @pmt: Support the PMT driver callback interface */ 550 struct { 551 /** @pmt.lock: protect access for telemetry data */ 552 struct mutex lock; 553 } pmt; 554 555 /** 556 * @pm_callback_task: Track the active task that is running in either 557 * the runtime_suspend or runtime_resume callbacks. 558 */ 559 struct task_struct *pm_callback_task; 560 561 /** @hwmon: hwmon subsystem integration */ 562 struct xe_hwmon *hwmon; 563 564 /** @heci_gsc: graphics security controller */ 565 struct xe_heci_gsc heci_gsc; 566 567 /** @nvm: discrete graphics non-volatile memory */ 568 struct intel_dg_nvm_dev *nvm; 569 570 /** @late_bind: xe mei late bind interface */ 571 struct xe_late_bind late_bind; 572 573 /** @oa: oa observation subsystem */ 574 struct xe_oa oa; 575 576 /** @pxp: Encapsulate Protected Xe Path support */ 577 struct xe_pxp *pxp; 578 579 /** @needs_flr_on_fini: requests function-reset on fini */ 580 bool needs_flr_on_fini; 581 582 /** @wedged: Struct to control Wedged States and mode */ 583 struct { 584 /** @wedged.flag: Xe device faced a critical error and is now blocked. */ 585 atomic_t flag; 586 /** @wedged.mode: Mode controlled by kernel parameter and debugfs */ 587 int mode; 588 /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */ 589 unsigned long method; 590 } wedged; 591 592 /** @bo_device: Struct to control async free of BOs */ 593 struct xe_bo_dev { 594 /** @bo_device.async_free: Free worker */ 595 struct work_struct async_free; 596 /** @bo_device.async_list: List of BOs to be freed */ 597 struct llist_head async_list; 598 } bo_device; 599 600 /** @pmu: performance monitoring unit */ 601 struct xe_pmu pmu; 602 603 /** @i2c: I2C host controller */ 604 struct xe_i2c *i2c; 605 606 /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */ 607 u32 atomic_svm_timeslice_ms; 608 609 #ifdef TEST_VM_OPS_ERROR 610 /** 611 * @vm_inject_error_position: inject errors at different places in VM 612 * bind IOCTL based on this value 613 */ 614 u8 vm_inject_error_position; 615 #endif 616 617 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM) 618 /** 619 * @global_total_pages: global GPU page usage tracked for gpu_mem 620 * tracepoints 621 */ 622 atomic64_t global_total_pages; 623 #endif 624 /** @val: The domain for exhaustive eviction, which is currently per device. */ 625 struct xe_validation_device val; 626 627 /** @psmi: GPU debugging via additional validation HW */ 628 struct { 629 /** @psmi.capture_obj: PSMI buffer for VRAM */ 630 struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1]; 631 /** @psmi.region_mask: Mask of valid memory regions */ 632 u8 region_mask; 633 } psmi; 634 635 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 636 /** @g2g_test_array: for testing G2G communications */ 637 u32 *g2g_test_array; 638 /** @g2g_test_count: for testing G2G communications */ 639 atomic_t g2g_test_count; 640 #endif 641 642 /* private: */ 643 644 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 645 /* 646 * Any fields below this point are the ones used by display. 647 * They are temporarily added here so xe_device can be desguised as 648 * drm_i915_private during build. After cleanup these should go away, 649 * migrating to the right sub-structs 650 */ 651 const struct dram_info *dram_info; 652 653 /* 654 * edram size in MB. 655 * Cannot be determined by PCIID. You must always read a register. 656 */ 657 u32 edram_size_mb; 658 659 struct intel_uncore { 660 spinlock_t lock; 661 } uncore; 662 #endif 663 }; 664 665 /** 666 * struct xe_file - file handle for Xe driver 667 */ 668 struct xe_file { 669 /** @xe: xe DEVICE **/ 670 struct xe_device *xe; 671 672 /** @drm: base DRM file */ 673 struct drm_file *drm; 674 675 /** @vm: VM state for file */ 676 struct { 677 /** @vm.xe: xarray to store VMs */ 678 struct xarray xa; 679 /** 680 * @vm.lock: Protects VM lookup + reference and removal from 681 * file xarray. Not an intended to be an outer lock which does 682 * thing while being held. 683 */ 684 struct mutex lock; 685 } vm; 686 687 /** @exec_queue: Submission exec queue state for file */ 688 struct { 689 /** @exec_queue.xa: xarray to store exece queues */ 690 struct xarray xa; 691 /** 692 * @exec_queue.lock: Protects exec queue lookup + reference and 693 * removal from file xarray. Not intended to be an outer lock 694 * which does things while being held. 695 */ 696 struct mutex lock; 697 /** 698 * @exec_queue.pending_removal: items pending to be removed to 699 * synchronize GPU state update with ongoing query. 700 */ 701 atomic_t pending_removal; 702 } exec_queue; 703 704 /** @run_ticks: hw engine class run time in ticks for this drm client */ 705 u64 run_ticks[XE_ENGINE_CLASS_MAX]; 706 707 /** @client: drm client */ 708 struct xe_drm_client *client; 709 710 /** 711 * @process_name: process name for file handle, used to safely output 712 * during error situations where xe file can outlive process 713 */ 714 char *process_name; 715 716 /** 717 * @pid: pid for file handle, used to safely output uring error 718 * situations where xe file can outlive process 719 */ 720 pid_t pid; 721 722 /** @refcount: ref count of this xe file */ 723 struct kref refcount; 724 }; 725 726 #endif 727