xref: /linux/drivers/gpu/drm/xe/xe_exec_queue_types.h (revision e811c33b1f137be26a20444b79db8cbc1fca1c89)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_EXEC_QUEUE_TYPES_H_
7 #define _XE_EXEC_QUEUE_TYPES_H_
8 
9 #include <linux/kref.h>
10 
11 #include <drm/gpu_scheduler.h>
12 
13 #include "xe_gpu_scheduler_types.h"
14 #include "xe_hw_engine_types.h"
15 #include "xe_hw_fence_types.h"
16 #include "xe_lrc_types.h"
17 
18 struct drm_syncobj;
19 struct xe_execlist_exec_queue;
20 struct xe_gt;
21 struct xe_guc_exec_queue;
22 struct xe_hw_engine;
23 struct xe_vm;
24 
25 enum xe_exec_queue_priority {
26 	XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
27 	XE_EXEC_QUEUE_PRIORITY_LOW = 0,
28 	XE_EXEC_QUEUE_PRIORITY_NORMAL,
29 	XE_EXEC_QUEUE_PRIORITY_HIGH,
30 	XE_EXEC_QUEUE_PRIORITY_KERNEL,
31 
32 	XE_EXEC_QUEUE_PRIORITY_COUNT
33 };
34 
35 /**
36  * struct xe_exec_queue - Execution queue
37  *
38  * Contains all state necessary for submissions. Can either be a user object or
39  * a kernel object.
40  */
41 struct xe_exec_queue {
42 	/** @xef: Back pointer to xe file if this is user created exec queue */
43 	struct xe_file *xef;
44 
45 	/** @gt: GT structure this exec queue can submit to */
46 	struct xe_gt *gt;
47 	/**
48 	 * @hwe: A hardware of the same class. May (physical engine) or may not
49 	 * (virtual engine) be where jobs actual engine up running. Should never
50 	 * really be used for submissions.
51 	 */
52 	struct xe_hw_engine *hwe;
53 	/** @refcount: ref count of this exec queue */
54 	struct kref refcount;
55 	/** @vm: VM (address space) for this exec queue */
56 	struct xe_vm *vm;
57 	/** @class: class of this exec queue */
58 	enum xe_engine_class class;
59 	/**
60 	 * @logical_mask: logical mask of where job submitted to exec queue can run
61 	 */
62 	u32 logical_mask;
63 	/** @name: name of this exec queue */
64 	char name[MAX_FENCE_NAME_LEN];
65 	/** @width: width (number BB submitted per exec) of this exec queue */
66 	u16 width;
67 	/** @msix_vec: MSI-X vector (for platforms that support it) */
68 	u16 msix_vec;
69 	/** @fence_irq: fence IRQ used to signal job completion */
70 	struct xe_hw_fence_irq *fence_irq;
71 
72 	/**
73 	 * @last_fence: last fence on exec queue, protected by vm->lock in write
74 	 * mode if bind exec queue, protected by dma resv lock if non-bind exec
75 	 * queue
76 	 */
77 	struct dma_fence *last_fence;
78 
79 /* queue used for kernel submission only */
80 #define EXEC_QUEUE_FLAG_KERNEL			BIT(0)
81 /* kernel engine only destroyed at driver unload */
82 #define EXEC_QUEUE_FLAG_PERMANENT		BIT(1)
83 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
84 #define EXEC_QUEUE_FLAG_VM			BIT(2)
85 /* child of VM queue for multi-tile VM jobs */
86 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(3)
87 /* kernel exec_queue only, set priority to highest level */
88 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(4)
89 /* flag to indicate low latency hint to guc */
90 #define EXEC_QUEUE_FLAG_LOW_LATENCY		BIT(5)
91 /* for migration (kernel copy, clear, bind) jobs */
92 #define EXEC_QUEUE_FLAG_MIGRATE			BIT(6)
93 
94 	/**
95 	 * @flags: flags for this exec queue, should statically setup aside from ban
96 	 * bit
97 	 */
98 	unsigned long flags;
99 
100 	union {
101 		/** @multi_gt_list: list head for VM bind engines if multi-GT */
102 		struct list_head multi_gt_list;
103 		/** @multi_gt_link: link for VM bind engines if multi-GT */
104 		struct list_head multi_gt_link;
105 	};
106 
107 	union {
108 		/** @execlist: execlist backend specific state for exec queue */
109 		struct xe_execlist_exec_queue *execlist;
110 		/** @guc: GuC backend specific state for exec queue */
111 		struct xe_guc_exec_queue *guc;
112 	};
113 
114 	/** @sched_props: scheduling properties */
115 	struct {
116 		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
117 		u32 timeslice_us;
118 		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
119 		u32 preempt_timeout_us;
120 		/** @sched_props.job_timeout_ms: job timeout in milliseconds */
121 		u32 job_timeout_ms;
122 		/** @sched_props.priority: priority of this exec queue */
123 		enum xe_exec_queue_priority priority;
124 	} sched_props;
125 
126 	/** @lr: long-running exec queue state */
127 	struct {
128 		/** @lr.pfence: preemption fence */
129 		struct dma_fence *pfence;
130 		/** @lr.context: preemption fence context */
131 		u64 context;
132 		/** @lr.seqno: preemption fence seqno */
133 		u32 seqno;
134 		/** @lr.link: link into VM's list of exec queues */
135 		struct list_head link;
136 	} lr;
137 
138 #define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT	0
139 #define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT	1
140 #define XE_EXEC_QUEUE_TLB_INVAL_COUNT		(XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT  + 1)
141 
142 	/** @tlb_inval: TLB invalidations exec queue state */
143 	struct {
144 		/**
145 		 * @tlb_inval.dep_scheduler: The TLB invalidation
146 		 * dependency scheduler
147 		 */
148 		struct xe_dep_scheduler *dep_scheduler;
149 	} tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
150 
151 	/** @pxp: PXP info tracking */
152 	struct {
153 		/** @pxp.type: PXP session type used by this queue */
154 		u8 type;
155 		/** @pxp.link: link into the list of PXP exec queues */
156 		struct list_head link;
157 	} pxp;
158 
159 	/** @ufence_syncobj: User fence syncobj */
160 	struct drm_syncobj *ufence_syncobj;
161 
162 	/** @ufence_timeline_value: User fence timeline value */
163 	u64 ufence_timeline_value;
164 
165 	/** @ops: submission backend exec queue operations */
166 	const struct xe_exec_queue_ops *ops;
167 
168 	/** @ring_ops: ring operations for this exec queue */
169 	const struct xe_ring_ops *ring_ops;
170 	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
171 	struct drm_sched_entity *entity;
172 	/**
173 	 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
174 	 * Protected by @vm's resv. Unused if @vm == NULL.
175 	 */
176 	u64 tlb_flush_seqno;
177 	/** @hw_engine_group_link: link into exec queues in the same hw engine group */
178 	struct list_head hw_engine_group_link;
179 	/** @lrc: logical ring context for this exec queue */
180 	struct xe_lrc *lrc[] __counted_by(width);
181 };
182 
183 /**
184  * struct xe_exec_queue_ops - Submission backend exec queue operations
185  */
186 struct xe_exec_queue_ops {
187 	/** @init: Initialize exec queue for submission backend */
188 	int (*init)(struct xe_exec_queue *q);
189 	/** @kill: Kill inflight submissions for backend */
190 	void (*kill)(struct xe_exec_queue *q);
191 	/** @fini: Undoes the init() for submission backend */
192 	void (*fini)(struct xe_exec_queue *q);
193 	/**
194 	 * @destroy: Destroy exec queue for submission backend. The backend
195 	 * function must call xe_exec_queue_fini() (which will in turn call the
196 	 * fini() backend function) to ensure the queue is properly cleaned up.
197 	 */
198 	void (*destroy)(struct xe_exec_queue *q);
199 	/** @set_priority: Set priority for exec queue */
200 	int (*set_priority)(struct xe_exec_queue *q,
201 			    enum xe_exec_queue_priority priority);
202 	/** @set_timeslice: Set timeslice for exec queue */
203 	int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
204 	/** @set_preempt_timeout: Set preemption timeout for exec queue */
205 	int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
206 	/**
207 	 * @suspend: Suspend exec queue from executing, allowed to be called
208 	 * multiple times in a row before resume with the caveat that
209 	 * suspend_wait returns before calling suspend again.
210 	 */
211 	int (*suspend)(struct xe_exec_queue *q);
212 	/**
213 	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
214 	 * call after suspend. In dma-fencing path thus must return within a
215 	 * reasonable amount of time. -ETIME return shall indicate an error
216 	 * waiting for suspend resulting in associated VM getting killed.
217 	 */
218 	int (*suspend_wait)(struct xe_exec_queue *q);
219 	/**
220 	 * @resume: Resume exec queue execution, exec queue must be in a suspended
221 	 * state and dma fence returned from most recent suspend call must be
222 	 * signalled when this function is called.
223 	 */
224 	void (*resume)(struct xe_exec_queue *q);
225 	/** @reset_status: check exec queue reset status */
226 	bool (*reset_status)(struct xe_exec_queue *q);
227 };
228 
229 #endif
230