xref: /linux/drivers/gpu/drm/xe/xe_execlist.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_execlist.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include "instructions/xe_mi_commands.h"
11 #include "regs/xe_engine_regs.h"
12 #include "regs/xe_gpu_commands.h"
13 #include "regs/xe_gt_regs.h"
14 #include "regs/xe_lrc_layout.h"
15 #include "xe_assert.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_exec_queue.h"
19 #include "xe_gt.h"
20 #include "xe_hw_fence.h"
21 #include "xe_lrc.h"
22 #include "xe_macros.h"
23 #include "xe_mmio.h"
24 #include "xe_mocs.h"
25 #include "xe_ring_ops_types.h"
26 #include "xe_sched_job.h"
27 
28 #define XE_EXECLIST_HANG_LIMIT 1
29 
30 #define SW_CTX_ID_SHIFT 37
31 #define SW_CTX_ID_WIDTH 11
32 #define XEHP_SW_CTX_ID_SHIFT  39
33 #define XEHP_SW_CTX_ID_WIDTH  16
34 
35 #define SW_CTX_ID \
36 	GENMASK_ULL(SW_CTX_ID_WIDTH + SW_CTX_ID_SHIFT - 1, \
37 		    SW_CTX_ID_SHIFT)
38 
39 #define XEHP_SW_CTX_ID \
40 	GENMASK_ULL(XEHP_SW_CTX_ID_WIDTH + XEHP_SW_CTX_ID_SHIFT - 1, \
41 		    XEHP_SW_CTX_ID_SHIFT)
42 
43 
44 static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
45 			u32 ctx_id)
46 {
47 	struct xe_gt *gt = hwe->gt;
48 	struct xe_device *xe = gt_to_xe(gt);
49 	u64 lrc_desc;
50 
51 	lrc_desc = xe_lrc_descriptor(lrc);
52 
53 	if (GRAPHICS_VERx100(xe) >= 1250) {
54 		xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
55 		lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
56 	} else {
57 		xe_gt_assert(hwe->gt, FIELD_FIT(SW_CTX_ID, ctx_id));
58 		lrc_desc |= FIELD_PREP(SW_CTX_ID, ctx_id);
59 	}
60 
61 	if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
62 		xe_mmio_write32(hwe->gt, RCU_MODE,
63 				_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
64 
65 	xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
66 	lrc->ring.old_tail = lrc->ring.tail;
67 
68 	/*
69 	 * Make sure the context image is complete before we submit it to HW.
70 	 *
71 	 * Ostensibly, writes (including the WCB) should be flushed prior to
72 	 * an uncached write such as our mmio register access, the empirical
73 	 * evidence (esp. on Braswell) suggests that the WC write into memory
74 	 * may not be visible to the HW prior to the completion of the UC
75 	 * register write and that we may begin execution from the context
76 	 * before its image is complete leading to invalid PD chasing.
77 	 */
78 	wmb();
79 
80 	xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
81 			xe_bo_ggtt_addr(hwe->hwsp));
82 	xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
83 	xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
84 			_MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
85 
86 	xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
87 			lower_32_bits(lrc_desc));
88 	xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
89 			upper_32_bits(lrc_desc));
90 	xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
91 			EL_CTRL_LOAD);
92 }
93 
94 static void __xe_execlist_port_start(struct xe_execlist_port *port,
95 				     struct xe_execlist_exec_queue *exl)
96 {
97 	struct xe_device *xe = gt_to_xe(port->hwe->gt);
98 	int max_ctx = FIELD_MAX(SW_CTX_ID);
99 
100 	if (GRAPHICS_VERx100(xe) >= 1250)
101 		max_ctx = FIELD_MAX(XEHP_SW_CTX_ID);
102 
103 	xe_execlist_port_assert_held(port);
104 
105 	if (port->running_exl != exl || !exl->has_run) {
106 		port->last_ctx_id++;
107 
108 		/* 0 is reserved for the kernel context */
109 		if (port->last_ctx_id > max_ctx)
110 			port->last_ctx_id = 1;
111 	}
112 
113 	__start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
114 	port->running_exl = exl;
115 	exl->has_run = true;
116 }
117 
118 static void __xe_execlist_port_idle(struct xe_execlist_port *port)
119 {
120 	u32 noop[2] = { MI_NOOP, MI_NOOP };
121 
122 	xe_execlist_port_assert_held(port);
123 
124 	if (!port->running_exl)
125 		return;
126 
127 	xe_lrc_write_ring(&port->hwe->kernel_lrc, noop, sizeof(noop));
128 	__start_lrc(port->hwe, &port->hwe->kernel_lrc, 0);
129 	port->running_exl = NULL;
130 }
131 
132 static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
133 {
134 	struct xe_lrc *lrc = exl->q->lrc;
135 
136 	return lrc->ring.tail == lrc->ring.old_tail;
137 }
138 
139 static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
140 {
141 	struct xe_execlist_exec_queue *exl = NULL;
142 	int i;
143 
144 	xe_execlist_port_assert_held(port);
145 
146 	for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
147 		while (!list_empty(&port->active[i])) {
148 			exl = list_first_entry(&port->active[i],
149 					       struct xe_execlist_exec_queue,
150 					       active_link);
151 			list_del(&exl->active_link);
152 
153 			if (xe_execlist_is_idle(exl)) {
154 				exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
155 				continue;
156 			}
157 
158 			list_add_tail(&exl->active_link, &port->active[i]);
159 			__xe_execlist_port_start(port, exl);
160 			return;
161 		}
162 	}
163 
164 	__xe_execlist_port_idle(port);
165 }
166 
167 static u64 read_execlist_status(struct xe_hw_engine *hwe)
168 {
169 	struct xe_gt *gt = hwe->gt;
170 	u32 hi, lo;
171 
172 	lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
173 	hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
174 
175 	return lo | (u64)hi << 32;
176 }
177 
178 static void xe_execlist_port_irq_handler_locked(struct xe_execlist_port *port)
179 {
180 	u64 status;
181 
182 	xe_execlist_port_assert_held(port);
183 
184 	status = read_execlist_status(port->hwe);
185 	if (status & BIT(7))
186 		return;
187 
188 	__xe_execlist_port_start_next_active(port);
189 }
190 
191 static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
192 					 u16 intr_vec)
193 {
194 	struct xe_execlist_port *port = hwe->exl_port;
195 
196 	spin_lock(&port->lock);
197 	xe_execlist_port_irq_handler_locked(port);
198 	spin_unlock(&port->lock);
199 }
200 
201 static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
202 					 enum xe_exec_queue_priority priority)
203 {
204 	xe_execlist_port_assert_held(port);
205 
206 	if (port->running_exl && port->running_exl->active_priority >= priority)
207 		return;
208 
209 	__xe_execlist_port_start_next_active(port);
210 }
211 
212 static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
213 {
214 	struct xe_execlist_port *port = exl->port;
215 	enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
216 
217 	XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
218 	XE_WARN_ON(priority < 0);
219 	XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
220 
221 	spin_lock_irq(&port->lock);
222 
223 	if (exl->active_priority != priority &&
224 	    exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET) {
225 		/* Priority changed, move it to the right list */
226 		list_del(&exl->active_link);
227 		exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
228 	}
229 
230 	if (exl->active_priority == XE_EXEC_QUEUE_PRIORITY_UNSET) {
231 		exl->active_priority = priority;
232 		list_add_tail(&exl->active_link, &port->active[priority]);
233 	}
234 
235 	xe_execlist_port_wake_locked(exl->port, priority);
236 
237 	spin_unlock_irq(&port->lock);
238 }
239 
240 static void xe_execlist_port_irq_fail_timer(struct timer_list *timer)
241 {
242 	struct xe_execlist_port *port =
243 		container_of(timer, struct xe_execlist_port, irq_fail);
244 
245 	spin_lock_irq(&port->lock);
246 	xe_execlist_port_irq_handler_locked(port);
247 	spin_unlock_irq(&port->lock);
248 
249 	port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
250 	add_timer(&port->irq_fail);
251 }
252 
253 struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
254 						 struct xe_hw_engine *hwe)
255 {
256 	struct drm_device *drm = &xe->drm;
257 	struct xe_execlist_port *port;
258 	int i;
259 
260 	port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
261 	if (!port)
262 		return ERR_PTR(-ENOMEM);
263 
264 	port->hwe = hwe;
265 
266 	spin_lock_init(&port->lock);
267 	for (i = 0; i < ARRAY_SIZE(port->active); i++)
268 		INIT_LIST_HEAD(&port->active[i]);
269 
270 	port->last_ctx_id = 1;
271 	port->running_exl = NULL;
272 
273 	hwe->irq_handler = xe_execlist_port_irq_handler;
274 
275 	/* TODO: Fix the interrupt code so it doesn't race like mad */
276 	timer_setup(&port->irq_fail, xe_execlist_port_irq_fail_timer, 0);
277 	port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
278 	add_timer(&port->irq_fail);
279 
280 	return port;
281 }
282 
283 void xe_execlist_port_destroy(struct xe_execlist_port *port)
284 {
285 	del_timer(&port->irq_fail);
286 
287 	/* Prevent an interrupt while we're destroying */
288 	spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
289 	port->hwe->irq_handler = NULL;
290 	spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
291 }
292 
293 static struct dma_fence *
294 execlist_run_job(struct drm_sched_job *drm_job)
295 {
296 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
297 	struct xe_exec_queue *q = job->q;
298 	struct xe_execlist_exec_queue *exl = job->q->execlist;
299 
300 	q->ring_ops->emit_job(job);
301 	xe_execlist_make_active(exl);
302 
303 	return dma_fence_get(job->fence);
304 }
305 
306 static void execlist_job_free(struct drm_sched_job *drm_job)
307 {
308 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
309 
310 	xe_sched_job_put(job);
311 }
312 
313 static const struct drm_sched_backend_ops drm_sched_ops = {
314 	.run_job = execlist_run_job,
315 	.free_job = execlist_job_free,
316 };
317 
318 static int execlist_exec_queue_init(struct xe_exec_queue *q)
319 {
320 	struct drm_gpu_scheduler *sched;
321 	struct xe_execlist_exec_queue *exl;
322 	struct xe_device *xe = gt_to_xe(q->gt);
323 	int err;
324 
325 	xe_assert(xe, !xe_device_uc_enabled(xe));
326 
327 	drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n");
328 
329 	exl = kzalloc(sizeof(*exl), GFP_KERNEL);
330 	if (!exl)
331 		return -ENOMEM;
332 
333 	exl->q = q;
334 
335 	err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
336 			     q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
337 			     XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
338 			     NULL, NULL, q->hwe->name,
339 			     gt_to_xe(q->gt)->drm.dev);
340 	if (err)
341 		goto err_free;
342 
343 	sched = &exl->sched;
344 	err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
345 	if (err)
346 		goto err_sched;
347 
348 	exl->port = q->hwe->exl_port;
349 	exl->has_run = false;
350 	exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
351 	q->execlist = exl;
352 	q->entity = &exl->entity;
353 
354 	xe_exec_queue_assign_name(q, ffs(q->logical_mask) - 1);
355 
356 	return 0;
357 
358 err_sched:
359 	drm_sched_fini(&exl->sched);
360 err_free:
361 	kfree(exl);
362 	return err;
363 }
364 
365 static void execlist_exec_queue_fini_async(struct work_struct *w)
366 {
367 	struct xe_execlist_exec_queue *ee =
368 		container_of(w, struct xe_execlist_exec_queue, fini_async);
369 	struct xe_exec_queue *q = ee->q;
370 	struct xe_execlist_exec_queue *exl = q->execlist;
371 	struct xe_device *xe = gt_to_xe(q->gt);
372 	unsigned long flags;
373 
374 	xe_assert(xe, !xe_device_uc_enabled(xe));
375 
376 	spin_lock_irqsave(&exl->port->lock, flags);
377 	if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
378 		list_del(&exl->active_link);
379 	spin_unlock_irqrestore(&exl->port->lock, flags);
380 
381 	drm_sched_entity_fini(&exl->entity);
382 	drm_sched_fini(&exl->sched);
383 	kfree(exl);
384 
385 	xe_exec_queue_fini(q);
386 }
387 
388 static void execlist_exec_queue_kill(struct xe_exec_queue *q)
389 {
390 	/* NIY */
391 }
392 
393 static void execlist_exec_queue_fini(struct xe_exec_queue *q)
394 {
395 	INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
396 	queue_work(system_unbound_wq, &q->execlist->fini_async);
397 }
398 
399 static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
400 					    enum xe_exec_queue_priority priority)
401 {
402 	/* NIY */
403 	return 0;
404 }
405 
406 static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
407 {
408 	/* NIY */
409 	return 0;
410 }
411 
412 static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
413 						   u32 preempt_timeout_us)
414 {
415 	/* NIY */
416 	return 0;
417 }
418 
419 static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
420 {
421 	/* NIY */
422 	return 0;
423 }
424 
425 static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
426 
427 {
428 	/* NIY */
429 }
430 
431 static void execlist_exec_queue_resume(struct xe_exec_queue *q)
432 {
433 	/* NIY */
434 }
435 
436 static bool execlist_exec_queue_reset_status(struct xe_exec_queue *q)
437 {
438 	/* NIY */
439 	return false;
440 }
441 
442 static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
443 	.init = execlist_exec_queue_init,
444 	.kill = execlist_exec_queue_kill,
445 	.fini = execlist_exec_queue_fini,
446 	.set_priority = execlist_exec_queue_set_priority,
447 	.set_timeslice = execlist_exec_queue_set_timeslice,
448 	.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
449 	.suspend = execlist_exec_queue_suspend,
450 	.suspend_wait = execlist_exec_queue_suspend_wait,
451 	.resume = execlist_exec_queue_resume,
452 	.reset_status = execlist_exec_queue_reset_status,
453 };
454 
455 int xe_execlist_init(struct xe_gt *gt)
456 {
457 	/* GuC submission enabled, nothing to do */
458 	if (xe_device_uc_enabled(gt_to_xe(gt)))
459 		return 0;
460 
461 	gt->exec_queue_ops = &execlist_exec_queue_ops;
462 
463 	return 0;
464 }
465