xref: /linux/drivers/gpu/drm/i915/gt/intel_engine.h (revision dec1c62e91ba268ab2a6e339d4d7a59287d5eba1)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
4 
5 #include <asm/cacheflush.h>
6 #include <drm/drm_util.h>
7 #include <drm/drm_cache.h>
8 
9 #include <linux/hashtable.h>
10 #include <linux/irq_work.h>
11 #include <linux/random.h>
12 #include <linux/seqlock.h>
13 
14 #include "i915_pmu.h"
15 #include "i915_request.h"
16 #include "i915_selftest.h"
17 #include "intel_engine_types.h"
18 #include "intel_gt_types.h"
19 #include "intel_timeline.h"
20 #include "intel_workarounds.h"
21 
22 struct drm_printer;
23 struct intel_context;
24 struct intel_gt;
25 struct lock_class_key;
26 
27 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
28  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
29  * to give some inclination as to some of the magic values used in the various
30  * workarounds!
31  */
32 #define CACHELINE_BYTES 64
33 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
34 
35 #define ENGINE_TRACE(e, fmt, ...) do {					\
36 	const struct intel_engine_cs *e__ __maybe_unused = (e);		\
37 	GEM_TRACE("%s %s: " fmt,					\
38 		  dev_name(e__->i915->drm.dev), e__->name,		\
39 		  ##__VA_ARGS__);					\
40 } while (0)
41 
42 /*
43  * The register defines to be used with the following macros need to accept a
44  * base param, e.g:
45  *
46  * REG_FOO(base) _MMIO((base) + <relative offset>)
47  * ENGINE_READ(engine, REG_FOO);
48  *
49  * register arrays are to be defined and accessed as follows:
50  *
51  * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
52  * ENGINE_READ_IDX(engine, REG_BAR, i)
53  */
54 
55 #define __ENGINE_REG_OP(op__, engine__, ...) \
56 	intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
57 
58 #define __ENGINE_READ_OP(op__, engine__, reg__) \
59 	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
60 
61 #define ENGINE_READ16(...)	__ENGINE_READ_OP(read16, __VA_ARGS__)
62 #define ENGINE_READ(...)	__ENGINE_READ_OP(read, __VA_ARGS__)
63 #define ENGINE_READ_FW(...)	__ENGINE_READ_OP(read_fw, __VA_ARGS__)
64 #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
65 #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
66 
67 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
68 	__ENGINE_REG_OP(read64_2x32, (engine__), \
69 			lower_reg__((engine__)->mmio_base), \
70 			upper_reg__((engine__)->mmio_base))
71 
72 #define ENGINE_READ_IDX(engine__, reg__, idx__) \
73 	__ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
74 
75 #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
76 	__ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
77 
78 #define ENGINE_WRITE16(...)	__ENGINE_WRITE_OP(write16, __VA_ARGS__)
79 #define ENGINE_WRITE(...)	__ENGINE_WRITE_OP(write, __VA_ARGS__)
80 #define ENGINE_WRITE_FW(...)	__ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
81 
82 #define GEN6_RING_FAULT_REG_READ(engine__) \
83 	intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
84 
85 #define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
86 	intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
87 
88 #define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
89 ({ \
90 	u32 __val; \
91 \
92 	__val = intel_uncore_read((engine__)->uncore, \
93 				  RING_FAULT_REG(engine__)); \
94 	__val &= ~(clear__); \
95 	__val |= (set__); \
96 	intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
97 			   __val); \
98 })
99 
100 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
101  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
102  */
103 
104 static inline unsigned int
105 execlists_num_ports(const struct intel_engine_execlists * const execlists)
106 {
107 	return execlists->port_mask + 1;
108 }
109 
110 static inline struct i915_request *
111 execlists_active(const struct intel_engine_execlists *execlists)
112 {
113 	struct i915_request * const *cur, * const *old, *active;
114 
115 	cur = READ_ONCE(execlists->active);
116 	smp_rmb(); /* pairs with overwrite protection in process_csb() */
117 	do {
118 		old = cur;
119 
120 		active = READ_ONCE(*cur);
121 		cur = READ_ONCE(execlists->active);
122 
123 		smp_rmb(); /* and complete the seqlock retry */
124 	} while (unlikely(cur != old));
125 
126 	return active;
127 }
128 
129 struct i915_request *
130 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
131 
132 static inline u32
133 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
134 {
135 	/* Ensure that the compiler doesn't optimize away the load. */
136 	return READ_ONCE(engine->status_page.addr[reg]);
137 }
138 
139 static inline void
140 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
141 {
142 	/* Writing into the status page should be done sparingly. Since
143 	 * we do when we are uncertain of the device state, we take a bit
144 	 * of extra paranoia to try and ensure that the HWS takes the value
145 	 * we give and that it doesn't end up trapped inside the CPU!
146 	 */
147 	drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
148 	WRITE_ONCE(engine->status_page.addr[reg], value);
149 	drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
150 }
151 
152 /*
153  * Reads a dword out of the status page, which is written to from the command
154  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
155  * MI_STORE_DATA_IMM.
156  *
157  * The following dwords have a reserved meaning:
158  * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
159  * 0x04: ring 0 head pointer
160  * 0x05: ring 1 head pointer (915-class)
161  * 0x06: ring 2 head pointer (915-class)
162  * 0x10-0x1b: Context status DWords (GM45)
163  * 0x1f: Last written status offset. (GM45)
164  * 0x20-0x2f: Reserved (Gen6+)
165  *
166  * The area from dword 0x30 to 0x3ff is available for driver usage.
167  */
168 #define I915_GEM_HWS_PREEMPT		0x32
169 #define I915_GEM_HWS_PREEMPT_ADDR	(I915_GEM_HWS_PREEMPT * sizeof(u32))
170 #define I915_GEM_HWS_SEQNO		0x40
171 #define I915_GEM_HWS_SEQNO_ADDR		(I915_GEM_HWS_SEQNO * sizeof(u32))
172 #define I915_GEM_HWS_MIGRATE		(0x42 * sizeof(u32))
173 #define I915_GEM_HWS_PXP		0x60
174 #define I915_GEM_HWS_PXP_ADDR		(I915_GEM_HWS_PXP * sizeof(u32))
175 #define I915_GEM_HWS_SCRATCH		0x80
176 
177 #define I915_HWS_CSB_BUF0_INDEX		0x10
178 #define I915_HWS_CSB_WRITE_INDEX	0x1f
179 #define ICL_HWS_CSB_WRITE_INDEX		0x2f
180 #define INTEL_HWS_CSB_WRITE_INDEX(__i915) \
181 	(GRAPHICS_VER(__i915) >= 11 ? ICL_HWS_CSB_WRITE_INDEX : I915_HWS_CSB_WRITE_INDEX)
182 
183 void intel_engine_stop(struct intel_engine_cs *engine);
184 void intel_engine_cleanup(struct intel_engine_cs *engine);
185 
186 int intel_engines_init_mmio(struct intel_gt *gt);
187 int intel_engines_init(struct intel_gt *gt);
188 
189 void intel_engine_free_request_pool(struct intel_engine_cs *engine);
190 
191 void intel_engines_release(struct intel_gt *gt);
192 void intel_engines_free(struct intel_gt *gt);
193 
194 int intel_engine_init_common(struct intel_engine_cs *engine);
195 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
196 
197 int intel_engine_resume(struct intel_engine_cs *engine);
198 
199 int intel_ring_submission_setup(struct intel_engine_cs *engine);
200 
201 int intel_engine_stop_cs(struct intel_engine_cs *engine);
202 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
203 
204 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
205 
206 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
207 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
208 
209 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
210 			       struct intel_instdone *instdone);
211 
212 void intel_engine_init_execlists(struct intel_engine_cs *engine);
213 
214 bool intel_engine_irq_enable(struct intel_engine_cs *engine);
215 void intel_engine_irq_disable(struct intel_engine_cs *engine);
216 
217 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
218 					bool stalled)
219 {
220 	if (engine->reset.rewind)
221 		engine->reset.rewind(engine, stalled);
222 	engine->serial++; /* contexts lost */
223 }
224 
225 bool intel_engines_are_idle(struct intel_gt *gt);
226 bool intel_engine_is_idle(struct intel_engine_cs *engine);
227 
228 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
229 static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
230 {
231 	__intel_engine_flush_submission(engine, true);
232 }
233 
234 void intel_engines_reset_default_submission(struct intel_gt *gt);
235 
236 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
237 
238 __printf(3, 4)
239 void intel_engine_dump(struct intel_engine_cs *engine,
240 		       struct drm_printer *m,
241 		       const char *header, ...);
242 void intel_engine_dump_active_requests(struct list_head *requests,
243 				       struct i915_request *hung_rq,
244 				       struct drm_printer *m);
245 
246 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
247 				   ktime_t *now);
248 
249 struct i915_request *
250 intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
251 
252 u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
253 struct intel_context *
254 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
255 				   struct i915_address_space *vm,
256 				   unsigned int ring_size,
257 				   unsigned int hwsp,
258 				   struct lock_class_key *key,
259 				   const char *name);
260 
261 void intel_engine_destroy_pinned_context(struct intel_context *ce);
262 
263 void xehp_enable_ccs_engines(struct intel_engine_cs *engine);
264 
265 #define ENGINE_PHYSICAL	0
266 #define ENGINE_MOCK	1
267 #define ENGINE_VIRTUAL	2
268 
269 static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
270 {
271 	return engine->gt->submission_method >= INTEL_SUBMISSION_GUC;
272 }
273 
274 static inline bool
275 intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
276 {
277 	if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
278 		return false;
279 
280 	return intel_engine_has_preemption(engine);
281 }
282 
283 #define FORCE_VIRTUAL	BIT(0)
284 struct intel_context *
285 intel_engine_create_virtual(struct intel_engine_cs **siblings,
286 			    unsigned int count, unsigned long flags);
287 
288 static inline struct intel_context *
289 intel_engine_create_parallel(struct intel_engine_cs **engines,
290 			     unsigned int num_engines,
291 			     unsigned int width)
292 {
293 	GEM_BUG_ON(!engines[0]->cops->create_parallel);
294 	return engines[0]->cops->create_parallel(engines, num_engines, width);
295 }
296 
297 static inline bool
298 intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
299 {
300 	/*
301 	 * For non-GuC submission we expect the back-end to look at the
302 	 * heartbeat status of the actual physical engine that the work
303 	 * has been (or is being) scheduled on, so we should only reach
304 	 * here with GuC submission enabled.
305 	 */
306 	GEM_BUG_ON(!intel_engine_uses_guc(engine));
307 
308 	return intel_guc_virtual_engine_has_heartbeat(engine);
309 }
310 
311 static inline bool
312 intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
313 {
314 	if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
315 		return false;
316 
317 	if (intel_engine_is_virtual(engine))
318 		return intel_virtual_engine_has_heartbeat(engine);
319 	else
320 		return READ_ONCE(engine->props.heartbeat_interval_ms);
321 }
322 
323 static inline struct intel_engine_cs *
324 intel_engine_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
325 {
326 	GEM_BUG_ON(!intel_engine_is_virtual(engine));
327 	return engine->cops->get_sibling(engine, sibling);
328 }
329 
330 static inline void
331 intel_engine_set_hung_context(struct intel_engine_cs *engine,
332 			      struct intel_context *ce)
333 {
334 	engine->hung_ce = ce;
335 }
336 
337 static inline void
338 intel_engine_clear_hung_context(struct intel_engine_cs *engine)
339 {
340 	intel_engine_set_hung_context(engine, NULL);
341 }
342 
343 static inline struct intel_context *
344 intel_engine_get_hung_context(struct intel_engine_cs *engine)
345 {
346 	return engine->hung_ce;
347 }
348 
349 #endif /* _INTEL_RINGBUFFER_H_ */
350