xref: /linux/drivers/gpu/drm/xe/xe_sched_job_types.h (revision deb879faa9d2f327ac5c079d9d1a1747b79260e3)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_SCHED_JOB_TYPES_H_
7 #define _XE_SCHED_JOB_TYPES_H_
8 
9 #include <linux/kref.h>
10 
11 #include <drm/gpu_scheduler.h>
12 
13 struct xe_exec_queue;
14 struct dma_fence;
15 struct dma_fence_chain;
16 
17 /**
18  * struct xe_job_ptrs - Per hw engine instance data
19  */
20 struct xe_job_ptrs {
21 	/** @lrc_fence: Pre-allocated uninitialized lrc fence.*/
22 	struct dma_fence *lrc_fence;
23 	/** @chain_fence: Pre-allocated uninitialized fence chain node. */
24 	struct dma_fence_chain *chain_fence;
25 	/** @batch_addr: Batch buffer address. */
26 	u64 batch_addr;
27 	/**
28 	 * @head: The tail pointer of the LRC (so head pointer of job) when the
29 	 * job was submitted
30 	 */
31 	u32 head;
32 };
33 
34 /**
35  * struct xe_sched_job - Xe schedule job (batch buffer tracking)
36  */
37 struct xe_sched_job {
38 	/** @drm: base DRM scheduler job */
39 	struct drm_sched_job drm;
40 	/** @q: Exec queue */
41 	struct xe_exec_queue *q;
42 	/** @refcount: ref count of this job */
43 	struct kref refcount;
44 	/**
45 	 * @fence: dma fence to indicate completion. 1 way relationship - job
46 	 * can safely reference fence, fence cannot safely reference job.
47 	 */
48 	struct dma_fence *fence;
49 	/** @user_fence: write back value when BB is complete */
50 	struct {
51 		/** @user_fence.used: user fence is used */
52 		bool used;
53 		/** @user_fence.addr: address to write to */
54 		u64 addr;
55 		/** @user_fence.value: write back value */
56 		u64 value;
57 	} user_fence;
58 	/** @lrc_seqno: LRC seqno */
59 	u32 lrc_seqno;
60 	/** @migrate_flush_flags: Additional flush flags for migration jobs */
61 	u32 migrate_flush_flags;
62 	/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
63 	bool ring_ops_flush_tlb;
64 	/** @ggtt: mapped in ggtt. */
65 	bool ggtt;
66 	/** @restore_replay: job being replayed for restore */
67 	bool restore_replay;
68 	/** @last_replay: last job being replayed */
69 	bool last_replay;
70 	/** @ptrs: per instance pointers. */
71 	struct xe_job_ptrs ptrs[];
72 };
73 
74 struct xe_sched_job_snapshot {
75 	u16 batch_addr_len;
76 	u64 batch_addr[] __counted_by(batch_addr_len);
77 };
78 
79 #endif
80