xref: /linux/drivers/gpu/drm/xe/xe_exec_queue_types.h (revision d6112dddbf354d21ff2fcd49338df68782492c73)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_EXEC_QUEUE_TYPES_H_
7 #define _XE_EXEC_QUEUE_TYPES_H_
8 
9 #include <linux/kref.h>
10 
11 #include <drm/gpu_scheduler.h>
12 
13 #include "xe_gpu_scheduler_types.h"
14 #include "xe_hw_engine_types.h"
15 #include "xe_hw_fence_types.h"
16 #include "xe_lrc_types.h"
17 
18 struct drm_syncobj;
19 struct xe_execlist_exec_queue;
20 struct xe_gt;
21 struct xe_guc_exec_queue;
22 struct xe_hw_engine;
23 struct xe_vm;
24 
25 enum xe_exec_queue_priority {
26 	XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
27 	XE_EXEC_QUEUE_PRIORITY_LOW = 0,
28 	XE_EXEC_QUEUE_PRIORITY_NORMAL,
29 	XE_EXEC_QUEUE_PRIORITY_HIGH,
30 	XE_EXEC_QUEUE_PRIORITY_KERNEL,
31 
32 	XE_EXEC_QUEUE_PRIORITY_COUNT
33 };
34 
35 /**
36  * struct xe_exec_queue - Execution queue
37  *
38  * Contains all state necessary for submissions. Can either be a user object or
39  * a kernel object.
40  */
41 struct xe_exec_queue {
42 	/** @xef: Back pointer to xe file if this is user created exec queue */
43 	struct xe_file *xef;
44 
45 	/** @gt: GT structure this exec queue can submit to */
46 	struct xe_gt *gt;
47 	/**
48 	 * @hwe: A hardware of the same class. May (physical engine) or may not
49 	 * (virtual engine) be where jobs actual engine up running. Should never
50 	 * really be used for submissions.
51 	 */
52 	struct xe_hw_engine *hwe;
53 	/** @refcount: ref count of this exec queue */
54 	struct kref refcount;
55 	/** @vm: VM (address space) for this exec queue */
56 	struct xe_vm *vm;
57 	/**
58 	 * @user_vm: User VM (address space) for this exec queue (bind queues
59 	 * only)
60 	 */
61 	struct xe_vm *user_vm;
62 
63 	/** @class: class of this exec queue */
64 	enum xe_engine_class class;
65 	/**
66 	 * @logical_mask: logical mask of where job submitted to exec queue can run
67 	 */
68 	u32 logical_mask;
69 	/** @name: name of this exec queue */
70 	char name[MAX_FENCE_NAME_LEN];
71 	/** @width: width (number BB submitted per exec) of this exec queue */
72 	u16 width;
73 	/** @msix_vec: MSI-X vector (for platforms that support it) */
74 	u16 msix_vec;
75 	/** @fence_irq: fence IRQ used to signal job completion */
76 	struct xe_hw_fence_irq *fence_irq;
77 
78 	/**
79 	 * @last_fence: last fence on exec queue, protected by vm->lock in write
80 	 * mode if bind exec queue, protected by dma resv lock if non-bind exec
81 	 * queue
82 	 */
83 	struct dma_fence *last_fence;
84 
85 /* queue used for kernel submission only */
86 #define EXEC_QUEUE_FLAG_KERNEL			BIT(0)
87 /* kernel engine only destroyed at driver unload */
88 #define EXEC_QUEUE_FLAG_PERMANENT		BIT(1)
89 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
90 #define EXEC_QUEUE_FLAG_VM			BIT(2)
91 /* child of VM queue for multi-tile VM jobs */
92 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(3)
93 /* kernel exec_queue only, set priority to highest level */
94 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(4)
95 /* flag to indicate low latency hint to guc */
96 #define EXEC_QUEUE_FLAG_LOW_LATENCY		BIT(5)
97 /* for migration (kernel copy, clear, bind) jobs */
98 #define EXEC_QUEUE_FLAG_MIGRATE			BIT(6)
99 
100 	/**
101 	 * @flags: flags for this exec queue, should statically setup aside from ban
102 	 * bit
103 	 */
104 	unsigned long flags;
105 
106 	union {
107 		/** @multi_gt_list: list head for VM bind engines if multi-GT */
108 		struct list_head multi_gt_list;
109 		/** @multi_gt_link: link for VM bind engines if multi-GT */
110 		struct list_head multi_gt_link;
111 	};
112 
113 	union {
114 		/** @execlist: execlist backend specific state for exec queue */
115 		struct xe_execlist_exec_queue *execlist;
116 		/** @guc: GuC backend specific state for exec queue */
117 		struct xe_guc_exec_queue *guc;
118 	};
119 
120 	/** @sched_props: scheduling properties */
121 	struct {
122 		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
123 		u32 timeslice_us;
124 		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
125 		u32 preempt_timeout_us;
126 		/** @sched_props.job_timeout_ms: job timeout in milliseconds */
127 		u32 job_timeout_ms;
128 		/** @sched_props.priority: priority of this exec queue */
129 		enum xe_exec_queue_priority priority;
130 	} sched_props;
131 
132 	/** @lr: long-running exec queue state */
133 	struct {
134 		/** @lr.pfence: preemption fence */
135 		struct dma_fence *pfence;
136 		/** @lr.context: preemption fence context */
137 		u64 context;
138 		/** @lr.seqno: preemption fence seqno */
139 		u32 seqno;
140 		/** @lr.link: link into VM's list of exec queues */
141 		struct list_head link;
142 	} lr;
143 
144 #define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT	0
145 #define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT	1
146 #define XE_EXEC_QUEUE_TLB_INVAL_COUNT		(XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT  + 1)
147 
148 	/** @tlb_inval: TLB invalidations exec queue state */
149 	struct {
150 		/**
151 		 * @tlb_inval.dep_scheduler: The TLB invalidation
152 		 * dependency scheduler
153 		 */
154 		struct xe_dep_scheduler *dep_scheduler;
155 		/**
156 		 * @last_fence: last fence for tlb invalidation, protected by
157 		 * vm->lock in write mode
158 		 */
159 		struct dma_fence *last_fence;
160 	} tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
161 
162 	/** @pxp: PXP info tracking */
163 	struct {
164 		/** @pxp.type: PXP session type used by this queue */
165 		u8 type;
166 		/** @pxp.link: link into the list of PXP exec queues */
167 		struct list_head link;
168 	} pxp;
169 
170 	/** @ufence_syncobj: User fence syncobj */
171 	struct drm_syncobj *ufence_syncobj;
172 
173 	/** @ufence_timeline_value: User fence timeline value */
174 	u64 ufence_timeline_value;
175 
176 	/** @ops: submission backend exec queue operations */
177 	const struct xe_exec_queue_ops *ops;
178 
179 	/** @ring_ops: ring operations for this exec queue */
180 	const struct xe_ring_ops *ring_ops;
181 	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
182 	struct drm_sched_entity *entity;
183 
184 #define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE	1000
185 	/** @job_cnt: number of drm jobs in this exec queue */
186 	atomic_t job_cnt;
187 
188 	/**
189 	 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
190 	 * Protected by @vm's resv. Unused if @vm == NULL.
191 	 */
192 	u64 tlb_flush_seqno;
193 	/** @hw_engine_group_link: link into exec queues in the same hw engine group */
194 	struct list_head hw_engine_group_link;
195 	/** @lrc: logical ring context for this exec queue */
196 	struct xe_lrc *lrc[] __counted_by(width);
197 };
198 
199 /**
200  * struct xe_exec_queue_ops - Submission backend exec queue operations
201  */
202 struct xe_exec_queue_ops {
203 	/** @init: Initialize exec queue for submission backend */
204 	int (*init)(struct xe_exec_queue *q);
205 	/** @kill: Kill inflight submissions for backend */
206 	void (*kill)(struct xe_exec_queue *q);
207 	/** @fini: Undoes the init() for submission backend */
208 	void (*fini)(struct xe_exec_queue *q);
209 	/**
210 	 * @destroy: Destroy exec queue for submission backend. The backend
211 	 * function must call xe_exec_queue_fini() (which will in turn call the
212 	 * fini() backend function) to ensure the queue is properly cleaned up.
213 	 */
214 	void (*destroy)(struct xe_exec_queue *q);
215 	/** @set_priority: Set priority for exec queue */
216 	int (*set_priority)(struct xe_exec_queue *q,
217 			    enum xe_exec_queue_priority priority);
218 	/** @set_timeslice: Set timeslice for exec queue */
219 	int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
220 	/** @set_preempt_timeout: Set preemption timeout for exec queue */
221 	int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
222 	/**
223 	 * @suspend: Suspend exec queue from executing, allowed to be called
224 	 * multiple times in a row before resume with the caveat that
225 	 * suspend_wait returns before calling suspend again.
226 	 */
227 	int (*suspend)(struct xe_exec_queue *q);
228 	/**
229 	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
230 	 * call after suspend. In dma-fencing path thus must return within a
231 	 * reasonable amount of time. -ETIME return shall indicate an error
232 	 * waiting for suspend resulting in associated VM getting killed.
233 	 * -EAGAIN return indicates the wait should be tried again, if the wait
234 	 * is within a work item, the work item should be requeued as deadlock
235 	 * avoidance mechanism.
236 	 */
237 	int (*suspend_wait)(struct xe_exec_queue *q);
238 	/**
239 	 * @resume: Resume exec queue execution, exec queue must be in a suspended
240 	 * state and dma fence returned from most recent suspend call must be
241 	 * signalled when this function is called.
242 	 */
243 	void (*resume)(struct xe_exec_queue *q);
244 	/** @reset_status: check exec queue reset status */
245 	bool (*reset_status)(struct xe_exec_queue *q);
246 };
247 
248 #endif
249