xref: /linux/drivers/gpu/drm/xe/xe_exec_queue_types.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_EXEC_QUEUE_TYPES_H_
7 #define _XE_EXEC_QUEUE_TYPES_H_
8 
9 #include <linux/kref.h>
10 
11 #include <drm/gpu_scheduler.h>
12 
13 #include "xe_gpu_scheduler_types.h"
14 #include "xe_hw_engine_types.h"
15 #include "xe_hw_fence_types.h"
16 #include "xe_lrc_types.h"
17 
18 struct xe_execlist_exec_queue;
19 struct xe_gt;
20 struct xe_guc_exec_queue;
21 struct xe_hw_engine;
22 struct xe_vm;
23 
24 enum xe_exec_queue_priority {
25 	XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
26 	XE_EXEC_QUEUE_PRIORITY_LOW = 0,
27 	XE_EXEC_QUEUE_PRIORITY_NORMAL,
28 	XE_EXEC_QUEUE_PRIORITY_HIGH,
29 	XE_EXEC_QUEUE_PRIORITY_KERNEL,
30 
31 	XE_EXEC_QUEUE_PRIORITY_COUNT
32 };
33 
34 /**
35  * struct xe_exec_queue - Execution queue
36  *
37  * Contains all state necessary for submissions. Can either be a user object or
38  * a kernel object.
39  */
40 struct xe_exec_queue {
41 	/** @xef: Back pointer to xe file if this is user created exec queue */
42 	struct xe_file *xef;
43 
44 	/** @gt: GT structure this exec queue can submit to */
45 	struct xe_gt *gt;
46 	/**
47 	 * @hwe: A hardware of the same class. May (physical engine) or may not
48 	 * (virtual engine) be where jobs actual engine up running. Should never
49 	 * really be used for submissions.
50 	 */
51 	struct xe_hw_engine *hwe;
52 	/** @refcount: ref count of this exec queue */
53 	struct kref refcount;
54 	/** @vm: VM (address space) for this exec queue */
55 	struct xe_vm *vm;
56 	/** @class: class of this exec queue */
57 	enum xe_engine_class class;
58 	/**
59 	 * @logical_mask: logical mask of where job submitted to exec queue can run
60 	 */
61 	u32 logical_mask;
62 	/** @name: name of this exec queue */
63 	char name[MAX_FENCE_NAME_LEN];
64 	/** @width: width (number BB submitted per exec) of this exec queue */
65 	u16 width;
66 	/** @msix_vec: MSI-X vector (for platforms that support it) */
67 	u16 msix_vec;
68 	/** @fence_irq: fence IRQ used to signal job completion */
69 	struct xe_hw_fence_irq *fence_irq;
70 
71 	/**
72 	 * @last_fence: last fence on exec queue, protected by vm->lock in write
73 	 * mode if bind exec queue, protected by dma resv lock if non-bind exec
74 	 * queue
75 	 */
76 	struct dma_fence *last_fence;
77 
78 /* queue used for kernel submission only */
79 #define EXEC_QUEUE_FLAG_KERNEL			BIT(0)
80 /* kernel engine only destroyed at driver unload */
81 #define EXEC_QUEUE_FLAG_PERMANENT		BIT(1)
82 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
83 #define EXEC_QUEUE_FLAG_VM			BIT(2)
84 /* child of VM queue for multi-tile VM jobs */
85 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(3)
86 /* kernel exec_queue only, set priority to highest level */
87 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(4)
88 
89 	/**
90 	 * @flags: flags for this exec queue, should statically setup aside from ban
91 	 * bit
92 	 */
93 	unsigned long flags;
94 
95 	union {
96 		/** @multi_gt_list: list head for VM bind engines if multi-GT */
97 		struct list_head multi_gt_list;
98 		/** @multi_gt_link: link for VM bind engines if multi-GT */
99 		struct list_head multi_gt_link;
100 	};
101 
102 	union {
103 		/** @execlist: execlist backend specific state for exec queue */
104 		struct xe_execlist_exec_queue *execlist;
105 		/** @guc: GuC backend specific state for exec queue */
106 		struct xe_guc_exec_queue *guc;
107 	};
108 
109 	/** @sched_props: scheduling properties */
110 	struct {
111 		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
112 		u32 timeslice_us;
113 		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
114 		u32 preempt_timeout_us;
115 		/** @sched_props.job_timeout_ms: job timeout in milliseconds */
116 		u32 job_timeout_ms;
117 		/** @sched_props.priority: priority of this exec queue */
118 		enum xe_exec_queue_priority priority;
119 	} sched_props;
120 
121 	/** @lr: long-running exec queue state */
122 	struct {
123 		/** @lr.pfence: preemption fence */
124 		struct dma_fence *pfence;
125 		/** @lr.context: preemption fence context */
126 		u64 context;
127 		/** @lr.seqno: preemption fence seqno */
128 		u32 seqno;
129 		/** @lr.link: link into VM's list of exec queues */
130 		struct list_head link;
131 	} lr;
132 
133 	/** @ops: submission backend exec queue operations */
134 	const struct xe_exec_queue_ops *ops;
135 
136 	/** @ring_ops: ring operations for this exec queue */
137 	const struct xe_ring_ops *ring_ops;
138 	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
139 	struct drm_sched_entity *entity;
140 	/**
141 	 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
142 	 * Protected by @vm's resv. Unused if @vm == NULL.
143 	 */
144 	u64 tlb_flush_seqno;
145 	/** @hw_engine_group_link: link into exec queues in the same hw engine group */
146 	struct list_head hw_engine_group_link;
147 	/** @lrc: logical ring context for this exec queue */
148 	struct xe_lrc *lrc[] __counted_by(width);
149 };
150 
151 /**
152  * struct xe_exec_queue_ops - Submission backend exec queue operations
153  */
154 struct xe_exec_queue_ops {
155 	/** @init: Initialize exec queue for submission backend */
156 	int (*init)(struct xe_exec_queue *q);
157 	/** @kill: Kill inflight submissions for backend */
158 	void (*kill)(struct xe_exec_queue *q);
159 	/** @fini: Fini exec queue for submission backend */
160 	void (*fini)(struct xe_exec_queue *q);
161 	/** @set_priority: Set priority for exec queue */
162 	int (*set_priority)(struct xe_exec_queue *q,
163 			    enum xe_exec_queue_priority priority);
164 	/** @set_timeslice: Set timeslice for exec queue */
165 	int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
166 	/** @set_preempt_timeout: Set preemption timeout for exec queue */
167 	int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
168 	/**
169 	 * @suspend: Suspend exec queue from executing, allowed to be called
170 	 * multiple times in a row before resume with the caveat that
171 	 * suspend_wait returns before calling suspend again.
172 	 */
173 	int (*suspend)(struct xe_exec_queue *q);
174 	/**
175 	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
176 	 * call after suspend. In dma-fencing path thus must return within a
177 	 * reasonable amount of time. -ETIME return shall indicate an error
178 	 * waiting for suspend resulting in associated VM getting killed.
179 	 */
180 	int (*suspend_wait)(struct xe_exec_queue *q);
181 	/**
182 	 * @resume: Resume exec queue execution, exec queue must be in a suspended
183 	 * state and dma fence returned from most recent suspend call must be
184 	 * signalled when this function is called.
185 	 */
186 	void (*resume)(struct xe_exec_queue *q);
187 	/** @reset_status: check exec queue reset status */
188 	bool (*reset_status)(struct xe_exec_queue *q);
189 };
190 
191 #endif
192