xref: /linux/drivers/gpu/drm/panfrost/panfrost_job.c (revision 72251fac062c0b4fe98670ec9e3db3f0702c50ae)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12 
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22 
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25 
26 struct panfrost_queue_state {
27 	struct drm_gpu_scheduler sched;
28 
29 	u64 fence_context;
30 	u64 emit_seqno;
31 };
32 
33 struct panfrost_job_slot {
34 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35 	spinlock_t job_lock;
36 };
37 
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41 	return container_of(sched_job, struct panfrost_job, base);
42 }
43 
44 struct panfrost_fence {
45 	struct dma_fence base;
46 	struct drm_device *dev;
47 	/* panfrost seqno for signaled() test */
48 	u64 seqno;
49 	int queue;
50 };
51 
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55 	return (struct panfrost_fence *)fence;
56 }
57 
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60 	return "panfrost";
61 }
62 
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65 	struct panfrost_fence *f = to_panfrost_fence(fence);
66 
67 	switch (f->queue) {
68 	case 0:
69 		return "panfrost-js-0";
70 	case 1:
71 		return "panfrost-js-1";
72 	case 2:
73 		return "panfrost-js-2";
74 	default:
75 		return NULL;
76 	}
77 }
78 
79 static const struct dma_fence_ops panfrost_fence_ops = {
80 	.get_driver_name = panfrost_fence_get_driver_name,
81 	.get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83 
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86 	struct panfrost_fence *fence;
87 	struct panfrost_job_slot *js = pfdev->js;
88 
89 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90 	if (!fence)
91 		return ERR_PTR(-ENOMEM);
92 
93 	fence->dev = pfdev->ddev;
94 	fence->queue = js_num;
95 	fence->seqno = ++js->queue[js_num].emit_seqno;
96 	dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97 		       js->queue[js_num].fence_context, fence->seqno);
98 
99 	return &fence->base;
100 }
101 
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104 	/* JS0: fragment jobs.
105 	 * JS1: vertex/tiler jobs
106 	 * JS2: compute jobs
107 	 */
108 	if (job->requirements & PANFROST_JD_REQ_FS)
109 		return 0;
110 
111 /* Not exposed to userspace yet */
112 #if 0
113 	if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114 		if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115 		    (job->pfdev->features.nr_core_groups == 2))
116 			return 2;
117 		if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118 			return 2;
119 	}
120 #endif
121 	return 1;
122 }
123 
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125 					u32 requirements,
126 					int js)
127 {
128 	u64 affinity;
129 
130 	/*
131 	 * Use all cores for now.
132 	 * Eventually we may need to support tiler only jobs and h/w with
133 	 * multiple (2) coherent core groups
134 	 */
135 	affinity = pfdev->features.shader_present;
136 
137 	job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138 	job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140 
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143 	struct panfrost_device *pfdev = job->pfdev;
144 	unsigned long flags;
145 	u32 cfg;
146 	u64 jc_head = job->jc;
147 	int ret;
148 
149 	ret = pm_runtime_get_sync(pfdev->dev);
150 	if (ret < 0)
151 		return;
152 
153 	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
154 		goto end;
155 
156 	panfrost_devfreq_record_transition(pfdev, js);
157 	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
158 
159 	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
160 	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
161 
162 	panfrost_job_write_affinity(pfdev, job->requirements, js);
163 
164 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
165 	 * start */
166 	/* TODO: different address spaces */
167 	cfg = JS_CONFIG_THREAD_PRI(8) |
168 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
169 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
170 
171 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
172 		cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
173 
174 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
175 		cfg |= JS_CONFIG_START_MMU;
176 
177 	job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
178 
179 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
180 		job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
181 
182 	/* GO ! */
183 	dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
184 				job, js, jc_head);
185 
186 	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
187 
188 	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
189 
190 end:
191 	pm_runtime_mark_last_busy(pfdev->dev);
192 	pm_runtime_put_autosuspend(pfdev->dev);
193 }
194 
195 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
196 					   int bo_count,
197 					   struct dma_fence **implicit_fences)
198 {
199 	int i;
200 
201 	for (i = 0; i < bo_count; i++)
202 		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
203 }
204 
205 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
206 					  int bo_count,
207 					  struct dma_fence *fence)
208 {
209 	int i;
210 
211 	for (i = 0; i < bo_count; i++)
212 		dma_resv_add_excl_fence(bos[i]->resv, fence);
213 }
214 
215 int panfrost_job_push(struct panfrost_job *job)
216 {
217 	struct panfrost_device *pfdev = job->pfdev;
218 	int slot = panfrost_job_get_slot(job);
219 	struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
220 	struct ww_acquire_ctx acquire_ctx;
221 	int ret = 0;
222 
223 	mutex_lock(&pfdev->sched_lock);
224 
225 	ret = drm_gem_lock_reservations(job->bos, job->bo_count,
226 					    &acquire_ctx);
227 	if (ret) {
228 		mutex_unlock(&pfdev->sched_lock);
229 		return ret;
230 	}
231 
232 	ret = drm_sched_job_init(&job->base, entity, NULL);
233 	if (ret) {
234 		mutex_unlock(&pfdev->sched_lock);
235 		goto unlock;
236 	}
237 
238 	job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
239 
240 	kref_get(&job->refcount); /* put by scheduler job completion */
241 
242 	panfrost_acquire_object_fences(job->bos, job->bo_count,
243 				       job->implicit_fences);
244 
245 	drm_sched_entity_push_job(&job->base, entity);
246 
247 	mutex_unlock(&pfdev->sched_lock);
248 
249 	panfrost_attach_object_fences(job->bos, job->bo_count,
250 				      job->render_done_fence);
251 
252 unlock:
253 	drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
254 
255 	return ret;
256 }
257 
258 static void panfrost_job_cleanup(struct kref *ref)
259 {
260 	struct panfrost_job *job = container_of(ref, struct panfrost_job,
261 						refcount);
262 	unsigned int i;
263 
264 	if (job->in_fences) {
265 		for (i = 0; i < job->in_fence_count; i++)
266 			dma_fence_put(job->in_fences[i]);
267 		kvfree(job->in_fences);
268 	}
269 	if (job->implicit_fences) {
270 		for (i = 0; i < job->bo_count; i++)
271 			dma_fence_put(job->implicit_fences[i]);
272 		kvfree(job->implicit_fences);
273 	}
274 	dma_fence_put(job->done_fence);
275 	dma_fence_put(job->render_done_fence);
276 
277 	if (job->bos) {
278 		for (i = 0; i < job->bo_count; i++)
279 			drm_gem_object_put_unlocked(job->bos[i]);
280 		kvfree(job->bos);
281 	}
282 
283 	kfree(job);
284 }
285 
286 void panfrost_job_put(struct panfrost_job *job)
287 {
288 	kref_put(&job->refcount, panfrost_job_cleanup);
289 }
290 
291 static void panfrost_job_free(struct drm_sched_job *sched_job)
292 {
293 	struct panfrost_job *job = to_panfrost_job(sched_job);
294 
295 	drm_sched_job_cleanup(sched_job);
296 
297 	panfrost_job_put(job);
298 }
299 
300 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
301 						 struct drm_sched_entity *s_entity)
302 {
303 	struct panfrost_job *job = to_panfrost_job(sched_job);
304 	struct dma_fence *fence;
305 	unsigned int i;
306 
307 	/* Explicit fences */
308 	for (i = 0; i < job->in_fence_count; i++) {
309 		if (job->in_fences[i]) {
310 			fence = job->in_fences[i];
311 			job->in_fences[i] = NULL;
312 			return fence;
313 		}
314 	}
315 
316 	/* Implicit fences, max. one per BO */
317 	for (i = 0; i < job->bo_count; i++) {
318 		if (job->implicit_fences[i]) {
319 			fence = job->implicit_fences[i];
320 			job->implicit_fences[i] = NULL;
321 			return fence;
322 		}
323 	}
324 
325 	return NULL;
326 }
327 
328 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
329 {
330 	struct panfrost_job *job = to_panfrost_job(sched_job);
331 	struct panfrost_device *pfdev = job->pfdev;
332 	int slot = panfrost_job_get_slot(job);
333 	struct dma_fence *fence = NULL;
334 
335 	if (unlikely(job->base.s_fence->finished.error))
336 		return NULL;
337 
338 	pfdev->jobs[slot] = job;
339 
340 	fence = panfrost_fence_create(pfdev, slot);
341 	if (IS_ERR(fence))
342 		return NULL;
343 
344 	if (job->done_fence)
345 		dma_fence_put(job->done_fence);
346 	job->done_fence = dma_fence_get(fence);
347 
348 	panfrost_job_hw_submit(job, slot);
349 
350 	return fence;
351 }
352 
353 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
354 {
355 	int j;
356 	u32 irq_mask = 0;
357 
358 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
359 		irq_mask |= MK_JS_MASK(j);
360 	}
361 
362 	job_write(pfdev, JOB_INT_CLEAR, irq_mask);
363 	job_write(pfdev, JOB_INT_MASK, irq_mask);
364 }
365 
366 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
367 {
368 	struct panfrost_job *job = to_panfrost_job(sched_job);
369 	struct panfrost_device *pfdev = job->pfdev;
370 	int js = panfrost_job_get_slot(job);
371 	int i;
372 
373 	/*
374 	 * If the GPU managed to complete this jobs fence, the timeout is
375 	 * spurious. Bail out.
376 	 */
377 	if (dma_fence_is_signaled(job->done_fence))
378 		return;
379 
380 	dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
381 		js,
382 		job_read(pfdev, JS_STATUS(js)),
383 		job_read(pfdev, JS_HEAD_LO(js)),
384 		job_read(pfdev, JS_TAIL_LO(js)),
385 		sched_job);
386 
387 	mutex_lock(&pfdev->reset_lock);
388 
389 	for (i = 0; i < NUM_JOB_SLOTS; i++)
390 		drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
391 
392 	if (sched_job)
393 		drm_sched_increase_karma(sched_job);
394 
395 	/* panfrost_core_dump(pfdev); */
396 
397 	panfrost_devfreq_record_transition(pfdev, js);
398 	panfrost_device_reset(pfdev);
399 
400 	for (i = 0; i < NUM_JOB_SLOTS; i++)
401 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
402 
403 	/* restart scheduler after GPU is usable again */
404 	for (i = 0; i < NUM_JOB_SLOTS; i++)
405 		drm_sched_start(&pfdev->js->queue[i].sched, true);
406 
407 	mutex_unlock(&pfdev->reset_lock);
408 }
409 
410 static const struct drm_sched_backend_ops panfrost_sched_ops = {
411 	.dependency = panfrost_job_dependency,
412 	.run_job = panfrost_job_run,
413 	.timedout_job = panfrost_job_timedout,
414 	.free_job = panfrost_job_free
415 };
416 
417 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
418 {
419 	struct panfrost_device *pfdev = data;
420 	u32 status = job_read(pfdev, JOB_INT_STAT);
421 	int j;
422 
423 	dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
424 
425 	if (!status)
426 		return IRQ_NONE;
427 
428 	pm_runtime_mark_last_busy(pfdev->dev);
429 
430 	for (j = 0; status; j++) {
431 		u32 mask = MK_JS_MASK(j);
432 
433 		if (!(status & mask))
434 			continue;
435 
436 		job_write(pfdev, JOB_INT_CLEAR, mask);
437 
438 		if (status & JOB_INT_MASK_ERR(j)) {
439 			job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
440 
441 			dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
442 				j,
443 				panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
444 				job_read(pfdev, JS_HEAD_LO(j)),
445 				job_read(pfdev, JS_TAIL_LO(j)));
446 
447 			drm_sched_fault(&pfdev->js->queue[j].sched);
448 		}
449 
450 		if (status & JOB_INT_MASK_DONE(j)) {
451 			panfrost_devfreq_record_transition(pfdev, j);
452 			dma_fence_signal(pfdev->jobs[j]->done_fence);
453 		}
454 
455 		status &= ~mask;
456 	}
457 
458 	return IRQ_HANDLED;
459 }
460 
461 int panfrost_job_init(struct panfrost_device *pfdev)
462 {
463 	struct panfrost_job_slot *js;
464 	int ret, j, irq;
465 
466 	pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
467 	if (!js)
468 		return -ENOMEM;
469 
470 	spin_lock_init(&js->job_lock);
471 
472 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
473 	if (irq <= 0)
474 		return -ENODEV;
475 
476 	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
477 			       IRQF_SHARED, "job", pfdev);
478 	if (ret) {
479 		dev_err(pfdev->dev, "failed to request job irq");
480 		return ret;
481 	}
482 
483 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
484 		js->queue[j].fence_context = dma_fence_context_alloc(1);
485 
486 		ret = drm_sched_init(&js->queue[j].sched,
487 				     &panfrost_sched_ops,
488 				     1, 0, msecs_to_jiffies(500),
489 				     "pan_js");
490 		if (ret) {
491 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
492 			goto err_sched;
493 		}
494 	}
495 
496 	panfrost_job_enable_interrupts(pfdev);
497 
498 	return 0;
499 
500 err_sched:
501 	for (j--; j >= 0; j--)
502 		drm_sched_fini(&js->queue[j].sched);
503 
504 	return ret;
505 }
506 
507 void panfrost_job_fini(struct panfrost_device *pfdev)
508 {
509 	struct panfrost_job_slot *js = pfdev->js;
510 	int j;
511 
512 	job_write(pfdev, JOB_INT_MASK, 0);
513 
514 	for (j = 0; j < NUM_JOB_SLOTS; j++)
515 		drm_sched_fini(&js->queue[j].sched);
516 
517 }
518 
519 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
520 {
521 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
522 	struct panfrost_job_slot *js = pfdev->js;
523 	struct drm_sched_rq *rq;
524 	int ret, i;
525 
526 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
527 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
528 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
529 		if (WARN_ON(ret))
530 			return ret;
531 	}
532 	return 0;
533 }
534 
535 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
536 {
537 	int i;
538 
539 	for (i = 0; i < NUM_JOB_SLOTS; i++)
540 		drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
541 }
542 
543 int panfrost_job_is_idle(struct panfrost_device *pfdev)
544 {
545 	struct panfrost_job_slot *js = pfdev->js;
546 	int i;
547 
548 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
549 		/* If there are any jobs in the HW queue, we're not idle */
550 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
551 			return false;
552 
553 		/* Check whether the hardware is idle */
554 		if (pfdev->devfreq.slot[i].busy)
555 			return false;
556 	}
557 
558 	return true;
559 }
560