1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #ifndef _XE_SCHED_JOB_TYPES_H_ 7 #define _XE_SCHED_JOB_TYPES_H_ 8 9 #include <linux/kref.h> 10 11 #include <drm/gpu_scheduler.h> 12 13 struct xe_exec_queue; 14 struct dma_fence; 15 struct dma_fence_chain; 16 17 /** 18 * struct xe_job_ptrs - Per hw engine instance data 19 */ 20 struct xe_job_ptrs { 21 /** @lrc_fence: Pre-allocated uinitialized lrc fence.*/ 22 struct dma_fence *lrc_fence; 23 /** @chain_fence: Pre-allocated ninitialized fence chain node. */ 24 struct dma_fence_chain *chain_fence; 25 /** @batch_addr: Batch buffer address. */ 26 u64 batch_addr; 27 }; 28 29 /** 30 * struct xe_sched_job - XE schedule job (batch buffer tracking) 31 */ 32 struct xe_sched_job { 33 /** @drm: base DRM scheduler job */ 34 struct drm_sched_job drm; 35 /** @q: Exec queue */ 36 struct xe_exec_queue *q; 37 /** @refcount: ref count of this job */ 38 struct kref refcount; 39 /** 40 * @fence: dma fence to indicate completion. 1 way relationship - job 41 * can safely reference fence, fence cannot safely reference job. 42 */ 43 #define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS 44 struct dma_fence *fence; 45 /** @user_fence: write back value when BB is complete */ 46 struct { 47 /** @user_fence.used: user fence is used */ 48 bool used; 49 /** @user_fence.addr: address to write to */ 50 u64 addr; 51 /** @user_fence.value: write back value */ 52 u64 value; 53 } user_fence; 54 /** @lrc_seqno: LRC seqno */ 55 u32 lrc_seqno; 56 /** @migrate_flush_flags: Additional flush flags for migration jobs */ 57 u32 migrate_flush_flags; 58 /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */ 59 bool ring_ops_flush_tlb; 60 /** @ptrs: per instance pointers. */ 61 struct xe_job_ptrs ptrs[]; 62 }; 63 64 struct xe_sched_job_snapshot { 65 u16 batch_addr_len; 66 u64 batch_addr[]; 67 }; 68 69 #endif 70