xref: /linux/drivers/gpu/drm/lima/lima_sched.c (revision e721d1cc8101a26fba22dc9a62309522e15bd0c7)
1a1d2a633SQiang Yu // SPDX-License-Identifier: GPL-2.0 OR MIT
2a1d2a633SQiang Yu /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3a1d2a633SQiang Yu 
453cb55b2SErico Nunes #include <linux/hardirq.h>
57938f421SLucas De Marchi #include <linux/iosys-map.h>
6a1d2a633SQiang Yu #include <linux/kthread.h>
7a1d2a633SQiang Yu #include <linux/slab.h>
8b78edd46SQiang Yu #include <linux/vmalloc.h>
950de2e9eSQiang Yu #include <linux/pm_runtime.h>
10a1d2a633SQiang Yu 
1119969707SMartin Blumenstingl #include "lima_devfreq.h"
12a1d2a633SQiang Yu #include "lima_drv.h"
13a1d2a633SQiang Yu #include "lima_sched.h"
14a1d2a633SQiang Yu #include "lima_vm.h"
15a1d2a633SQiang Yu #include "lima_mmu.h"
16a1d2a633SQiang Yu #include "lima_l2_cache.h"
17d61dd248SQiang Yu #include "lima_gem.h"
187f60c4b9SQiang Yu #include "lima_trace.h"
19a1d2a633SQiang Yu 
20a1d2a633SQiang Yu struct lima_fence {
21a1d2a633SQiang Yu 	struct dma_fence base;
22a1d2a633SQiang Yu 	struct lima_sched_pipe *pipe;
23a1d2a633SQiang Yu };
24a1d2a633SQiang Yu 
25a1d2a633SQiang Yu static struct kmem_cache *lima_fence_slab;
26a1d2a633SQiang Yu static int lima_fence_slab_refcnt;
27a1d2a633SQiang Yu 
28a1d2a633SQiang Yu int lima_sched_slab_init(void)
29a1d2a633SQiang Yu {
30a1d2a633SQiang Yu 	if (!lima_fence_slab) {
31a1d2a633SQiang Yu 		lima_fence_slab = kmem_cache_create(
32a1d2a633SQiang Yu 			"lima_fence", sizeof(struct lima_fence), 0,
33a1d2a633SQiang Yu 			SLAB_HWCACHE_ALIGN, NULL);
34a1d2a633SQiang Yu 		if (!lima_fence_slab)
35a1d2a633SQiang Yu 			return -ENOMEM;
36a1d2a633SQiang Yu 	}
37a1d2a633SQiang Yu 
38a1d2a633SQiang Yu 	lima_fence_slab_refcnt++;
39a1d2a633SQiang Yu 	return 0;
40a1d2a633SQiang Yu }
41a1d2a633SQiang Yu 
42a1d2a633SQiang Yu void lima_sched_slab_fini(void)
43a1d2a633SQiang Yu {
44a1d2a633SQiang Yu 	if (!--lima_fence_slab_refcnt) {
45a1d2a633SQiang Yu 		kmem_cache_destroy(lima_fence_slab);
46a1d2a633SQiang Yu 		lima_fence_slab = NULL;
47a1d2a633SQiang Yu 	}
48a1d2a633SQiang Yu }
49a1d2a633SQiang Yu 
50a1d2a633SQiang Yu static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
51a1d2a633SQiang Yu {
52a1d2a633SQiang Yu 	return container_of(fence, struct lima_fence, base);
53a1d2a633SQiang Yu }
54a1d2a633SQiang Yu 
55a1d2a633SQiang Yu static const char *lima_fence_get_driver_name(struct dma_fence *fence)
56a1d2a633SQiang Yu {
57a1d2a633SQiang Yu 	return "lima";
58a1d2a633SQiang Yu }
59a1d2a633SQiang Yu 
60a1d2a633SQiang Yu static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
61a1d2a633SQiang Yu {
62a1d2a633SQiang Yu 	struct lima_fence *f = to_lima_fence(fence);
63a1d2a633SQiang Yu 
64a1d2a633SQiang Yu 	return f->pipe->base.name;
65a1d2a633SQiang Yu }
66a1d2a633SQiang Yu 
67a1d2a633SQiang Yu static void lima_fence_release_rcu(struct rcu_head *rcu)
68a1d2a633SQiang Yu {
69a1d2a633SQiang Yu 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
70a1d2a633SQiang Yu 	struct lima_fence *fence = to_lima_fence(f);
71a1d2a633SQiang Yu 
72a1d2a633SQiang Yu 	kmem_cache_free(lima_fence_slab, fence);
73a1d2a633SQiang Yu }
74a1d2a633SQiang Yu 
75a1d2a633SQiang Yu static void lima_fence_release(struct dma_fence *fence)
76a1d2a633SQiang Yu {
77a1d2a633SQiang Yu 	struct lima_fence *f = to_lima_fence(fence);
78a1d2a633SQiang Yu 
79a1d2a633SQiang Yu 	call_rcu(&f->base.rcu, lima_fence_release_rcu);
80a1d2a633SQiang Yu }
81a1d2a633SQiang Yu 
82a1d2a633SQiang Yu static const struct dma_fence_ops lima_fence_ops = {
83a1d2a633SQiang Yu 	.get_driver_name = lima_fence_get_driver_name,
84a1d2a633SQiang Yu 	.get_timeline_name = lima_fence_get_timeline_name,
85a1d2a633SQiang Yu 	.release = lima_fence_release,
86a1d2a633SQiang Yu };
87a1d2a633SQiang Yu 
88a1d2a633SQiang Yu static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
89a1d2a633SQiang Yu {
90a1d2a633SQiang Yu 	struct lima_fence *fence;
91a1d2a633SQiang Yu 
92a1d2a633SQiang Yu 	fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
93a1d2a633SQiang Yu 	if (!fence)
94a1d2a633SQiang Yu 		return NULL;
95a1d2a633SQiang Yu 
96a1d2a633SQiang Yu 	fence->pipe = pipe;
97a1d2a633SQiang Yu 	dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
98a1d2a633SQiang Yu 		       pipe->fence_context, ++pipe->fence_seqno);
99a1d2a633SQiang Yu 
100a1d2a633SQiang Yu 	return fence;
101a1d2a633SQiang Yu }
102a1d2a633SQiang Yu 
103a1d2a633SQiang Yu static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
104a1d2a633SQiang Yu {
105a1d2a633SQiang Yu 	return container_of(job, struct lima_sched_task, base);
106a1d2a633SQiang Yu }
107a1d2a633SQiang Yu 
108a1d2a633SQiang Yu static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
109a1d2a633SQiang Yu {
110a1d2a633SQiang Yu 	return container_of(sched, struct lima_sched_pipe, base);
111a1d2a633SQiang Yu }
112a1d2a633SQiang Yu 
113a1d2a633SQiang Yu int lima_sched_task_init(struct lima_sched_task *task,
114a1d2a633SQiang Yu 			 struct lima_sched_context *context,
115a1d2a633SQiang Yu 			 struct lima_bo **bos, int num_bos,
116a1d2a633SQiang Yu 			 struct lima_vm *vm)
117a1d2a633SQiang Yu {
118a1d2a633SQiang Yu 	int err, i;
119a1d2a633SQiang Yu 
120a1d2a633SQiang Yu 	task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
121a1d2a633SQiang Yu 	if (!task->bos)
122a1d2a633SQiang Yu 		return -ENOMEM;
123a1d2a633SQiang Yu 
124a1d2a633SQiang Yu 	for (i = 0; i < num_bos; i++)
125d61dd248SQiang Yu 		drm_gem_object_get(&bos[i]->base.base);
126a1d2a633SQiang Yu 
127a78422e9SDanilo Krummrich 	err = drm_sched_job_init(&task->base, &context->base, 1, vm);
128a1d2a633SQiang Yu 	if (err) {
129a1d2a633SQiang Yu 		kfree(task->bos);
130a1d2a633SQiang Yu 		return err;
131a1d2a633SQiang Yu 	}
132a1d2a633SQiang Yu 
133dbe48d03SDaniel Vetter 	drm_sched_job_arm(&task->base);
134dbe48d03SDaniel Vetter 
135a1d2a633SQiang Yu 	task->num_bos = num_bos;
136a1d2a633SQiang Yu 	task->vm = lima_vm_get(vm);
137f3fb2007SEric Anholt 
138a1d2a633SQiang Yu 	return 0;
139a1d2a633SQiang Yu }
140a1d2a633SQiang Yu 
141a1d2a633SQiang Yu void lima_sched_task_fini(struct lima_sched_task *task)
142a1d2a633SQiang Yu {
143a1d2a633SQiang Yu 	int i;
144a1d2a633SQiang Yu 
145a1d2a633SQiang Yu 	drm_sched_job_cleanup(&task->base);
146a1d2a633SQiang Yu 
147a1d2a633SQiang Yu 	if (task->bos) {
148a1d2a633SQiang Yu 		for (i = 0; i < task->num_bos; i++)
1494ded8550SEmil Velikov 			drm_gem_object_put(&task->bos[i]->base.base);
150a1d2a633SQiang Yu 		kfree(task->bos);
151a1d2a633SQiang Yu 	}
152a1d2a633SQiang Yu 
153a1d2a633SQiang Yu 	lima_vm_put(task->vm);
154a1d2a633SQiang Yu }
155a1d2a633SQiang Yu 
156a1d2a633SQiang Yu int lima_sched_context_init(struct lima_sched_pipe *pipe,
157*e721d1ccSErico Nunes 			    struct lima_sched_context *context)
158a1d2a633SQiang Yu {
159b3ac1766SNirmoy Das 	struct drm_gpu_scheduler *sched = &pipe->base;
160a1d2a633SQiang Yu 
161b3ac1766SNirmoy Das 	return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
162*e721d1ccSErico Nunes 				     &sched, 1, NULL);
163a1d2a633SQiang Yu }
164a1d2a633SQiang Yu 
165a1d2a633SQiang Yu void lima_sched_context_fini(struct lima_sched_pipe *pipe,
166a1d2a633SQiang Yu 			     struct lima_sched_context *context)
167a1d2a633SQiang Yu {
1686eea63c7SErico Nunes 	drm_sched_entity_destroy(&context->base);
169a1d2a633SQiang Yu }
170a1d2a633SQiang Yu 
1710e10e9a1SDaniel Vetter struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
172a1d2a633SQiang Yu {
173a1d2a633SQiang Yu 	struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
174a1d2a633SQiang Yu 
1757f60c4b9SQiang Yu 	trace_lima_task_submit(task);
1760e10e9a1SDaniel Vetter 	drm_sched_entity_push_job(&task->base);
177a1d2a633SQiang Yu 	return fence;
178a1d2a633SQiang Yu }
179a1d2a633SQiang Yu 
18050de2e9eSQiang Yu static int lima_pm_busy(struct lima_device *ldev)
18150de2e9eSQiang Yu {
18250de2e9eSQiang Yu 	int ret;
18350de2e9eSQiang Yu 
18450de2e9eSQiang Yu 	/* resume GPU if it has been suspended by runtime PM */
185de499781SQinglang Miao 	ret = pm_runtime_resume_and_get(ldev->dev);
18650de2e9eSQiang Yu 	if (ret < 0)
18750de2e9eSQiang Yu 		return ret;
18850de2e9eSQiang Yu 
18950de2e9eSQiang Yu 	lima_devfreq_record_busy(&ldev->devfreq);
19050de2e9eSQiang Yu 	return 0;
19150de2e9eSQiang Yu }
19250de2e9eSQiang Yu 
19350de2e9eSQiang Yu static void lima_pm_idle(struct lima_device *ldev)
19450de2e9eSQiang Yu {
19550de2e9eSQiang Yu 	lima_devfreq_record_idle(&ldev->devfreq);
19650de2e9eSQiang Yu 
19750de2e9eSQiang Yu 	/* GPU can do auto runtime suspend */
19850de2e9eSQiang Yu 	pm_runtime_mark_last_busy(ldev->dev);
19950de2e9eSQiang Yu 	pm_runtime_put_autosuspend(ldev->dev);
20050de2e9eSQiang Yu }
20150de2e9eSQiang Yu 
202a1d2a633SQiang Yu static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
203a1d2a633SQiang Yu {
204a1d2a633SQiang Yu 	struct lima_sched_task *task = to_lima_task(job);
205a1d2a633SQiang Yu 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
20650de2e9eSQiang Yu 	struct lima_device *ldev = pipe->ldev;
207a1d2a633SQiang Yu 	struct lima_fence *fence;
20850de2e9eSQiang Yu 	int i, err;
209a1d2a633SQiang Yu 
210a1d2a633SQiang Yu 	/* after GPU reset */
211a1d2a633SQiang Yu 	if (job->s_fence->finished.error < 0)
212a1d2a633SQiang Yu 		return NULL;
213a1d2a633SQiang Yu 
214a1d2a633SQiang Yu 	fence = lima_fence_create(pipe);
215a1d2a633SQiang Yu 	if (!fence)
216a1d2a633SQiang Yu 		return NULL;
21750de2e9eSQiang Yu 
21850de2e9eSQiang Yu 	err = lima_pm_busy(ldev);
21950de2e9eSQiang Yu 	if (err < 0) {
22050de2e9eSQiang Yu 		dma_fence_put(&fence->base);
22150de2e9eSQiang Yu 		return NULL;
22250de2e9eSQiang Yu 	}
22350de2e9eSQiang Yu 
224a1d2a633SQiang Yu 	task->fence = &fence->base;
225a1d2a633SQiang Yu 
226a1d2a633SQiang Yu 	/* for caller usage of the fence, otherwise irq handler
227a1d2a633SQiang Yu 	 * may consume the fence before caller use it
228a1d2a633SQiang Yu 	 */
229cd434e74SLee Jones 	dma_fence_get(task->fence);
230a1d2a633SQiang Yu 
231a1d2a633SQiang Yu 	pipe->current_task = task;
232a1d2a633SQiang Yu 
233a1d2a633SQiang Yu 	/* this is needed for MMU to work correctly, otherwise GP/PP
234a1d2a633SQiang Yu 	 * will hang or page fault for unknown reason after running for
235a1d2a633SQiang Yu 	 * a while.
236a1d2a633SQiang Yu 	 *
237a1d2a633SQiang Yu 	 * Need to investigate:
238a1d2a633SQiang Yu 	 * 1. is it related to TLB
239a1d2a633SQiang Yu 	 * 2. how much performance will be affected by L2 cache flush
240a1d2a633SQiang Yu 	 * 3. can we reduce the calling of this function because all
241a1d2a633SQiang Yu 	 *    GP/PP use the same L2 cache on mali400
242a1d2a633SQiang Yu 	 *
243a1d2a633SQiang Yu 	 * TODO:
244a1d2a633SQiang Yu 	 * 1. move this to task fini to save some wait time?
245a1d2a633SQiang Yu 	 * 2. when GP/PP use different l2 cache, need PP wait GP l2
246a1d2a633SQiang Yu 	 *    cache flush?
247a1d2a633SQiang Yu 	 */
248a1d2a633SQiang Yu 	for (i = 0; i < pipe->num_l2_cache; i++)
249a1d2a633SQiang Yu 		lima_l2_cache_flush(pipe->l2_cache[i]);
250a1d2a633SQiang Yu 
2514eb70cd3SQiang Yu 	lima_vm_put(pipe->current_vm);
2524eb70cd3SQiang Yu 	pipe->current_vm = lima_vm_get(task->vm);
253a1d2a633SQiang Yu 
254a1d2a633SQiang Yu 	if (pipe->bcast_mmu)
2554eb70cd3SQiang Yu 		lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
256a1d2a633SQiang Yu 	else {
257a1d2a633SQiang Yu 		for (i = 0; i < pipe->num_mmu; i++)
2584eb70cd3SQiang Yu 			lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
259a1d2a633SQiang Yu 	}
260a1d2a633SQiang Yu 
2617f60c4b9SQiang Yu 	trace_lima_task_run(task);
2627f60c4b9SQiang Yu 
263a1d2a633SQiang Yu 	pipe->error = false;
264a1d2a633SQiang Yu 	pipe->task_run(pipe, task);
265a1d2a633SQiang Yu 
266a1d2a633SQiang Yu 	return task->fence;
267a1d2a633SQiang Yu }
268a1d2a633SQiang Yu 
269b78edd46SQiang Yu static void lima_sched_build_error_task_list(struct lima_sched_task *task)
270b78edd46SQiang Yu {
271b78edd46SQiang Yu 	struct lima_sched_error_task *et;
272b78edd46SQiang Yu 	struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
273b78edd46SQiang Yu 	struct lima_ip *ip = pipe->processor[0];
274b78edd46SQiang Yu 	int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
275b78edd46SQiang Yu 	struct lima_device *dev = ip->dev;
276b78edd46SQiang Yu 	struct lima_sched_context *sched_ctx =
277b78edd46SQiang Yu 		container_of(task->base.entity,
278b78edd46SQiang Yu 			     struct lima_sched_context, base);
279b78edd46SQiang Yu 	struct lima_ctx *ctx =
280b78edd46SQiang Yu 		container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
281b78edd46SQiang Yu 	struct lima_dump_task *dt;
282b78edd46SQiang Yu 	struct lima_dump_chunk *chunk;
283b78edd46SQiang Yu 	struct lima_dump_chunk_pid *pid_chunk;
284b78edd46SQiang Yu 	struct lima_dump_chunk_buffer *buffer_chunk;
285b78edd46SQiang Yu 	u32 size, task_size, mem_size;
286b78edd46SQiang Yu 	int i;
2877938f421SLucas De Marchi 	struct iosys_map map;
28849a3f51dSThomas Zimmermann 	int ret;
289b78edd46SQiang Yu 
290b78edd46SQiang Yu 	mutex_lock(&dev->error_task_list_lock);
291b78edd46SQiang Yu 
292b78edd46SQiang Yu 	if (dev->dump.num_tasks >= lima_max_error_tasks) {
293d04f2a8eSQiang Yu 		dev_info(dev->dev, "fail to save task state from %s pid %d: "
294d04f2a8eSQiang Yu 			 "error task list is full\n", ctx->pname, ctx->pid);
295b78edd46SQiang Yu 		goto out;
296b78edd46SQiang Yu 	}
297b78edd46SQiang Yu 
298b78edd46SQiang Yu 	/* frame chunk */
299b78edd46SQiang Yu 	size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
300b78edd46SQiang Yu 	/* process name chunk */
301b78edd46SQiang Yu 	size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
302b78edd46SQiang Yu 	/* pid chunk */
303b78edd46SQiang Yu 	size += sizeof(struct lima_dump_chunk);
304b78edd46SQiang Yu 	/* buffer chunks */
305b78edd46SQiang Yu 	for (i = 0; i < task->num_bos; i++) {
306b78edd46SQiang Yu 		struct lima_bo *bo = task->bos[i];
307b78edd46SQiang Yu 
308b78edd46SQiang Yu 		size += sizeof(struct lima_dump_chunk);
309b78edd46SQiang Yu 		size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
310b78edd46SQiang Yu 	}
311b78edd46SQiang Yu 
312b78edd46SQiang Yu 	task_size = size + sizeof(struct lima_dump_task);
313b78edd46SQiang Yu 	mem_size = task_size + sizeof(*et);
314b78edd46SQiang Yu 	et = kvmalloc(mem_size, GFP_KERNEL);
315b78edd46SQiang Yu 	if (!et) {
316b78edd46SQiang Yu 		dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
317b78edd46SQiang Yu 			mem_size);
318b78edd46SQiang Yu 		goto out;
319b78edd46SQiang Yu 	}
320b78edd46SQiang Yu 
321b78edd46SQiang Yu 	et->data = et + 1;
322b78edd46SQiang Yu 	et->size = task_size;
323b78edd46SQiang Yu 
324b78edd46SQiang Yu 	dt = et->data;
325b78edd46SQiang Yu 	memset(dt, 0, sizeof(*dt));
326b78edd46SQiang Yu 	dt->id = pipe_id;
327b78edd46SQiang Yu 	dt->size = size;
328b78edd46SQiang Yu 
329b78edd46SQiang Yu 	chunk = (struct lima_dump_chunk *)(dt + 1);
330b78edd46SQiang Yu 	memset(chunk, 0, sizeof(*chunk));
331b78edd46SQiang Yu 	chunk->id = LIMA_DUMP_CHUNK_FRAME;
332b78edd46SQiang Yu 	chunk->size = pipe->frame_size;
333b78edd46SQiang Yu 	memcpy(chunk + 1, task->frame, pipe->frame_size);
334b78edd46SQiang Yu 	dt->num_chunks++;
335b78edd46SQiang Yu 
336b78edd46SQiang Yu 	chunk = (void *)(chunk + 1) + chunk->size;
337b78edd46SQiang Yu 	memset(chunk, 0, sizeof(*chunk));
338b78edd46SQiang Yu 	chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
339b78edd46SQiang Yu 	chunk->size = sizeof(ctx->pname);
340b78edd46SQiang Yu 	memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
341b78edd46SQiang Yu 	dt->num_chunks++;
342b78edd46SQiang Yu 
343b78edd46SQiang Yu 	pid_chunk = (void *)(chunk + 1) + chunk->size;
344b78edd46SQiang Yu 	memset(pid_chunk, 0, sizeof(*pid_chunk));
345b78edd46SQiang Yu 	pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
346b78edd46SQiang Yu 	pid_chunk->pid = ctx->pid;
347b78edd46SQiang Yu 	dt->num_chunks++;
348b78edd46SQiang Yu 
349b78edd46SQiang Yu 	buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
350b78edd46SQiang Yu 	for (i = 0; i < task->num_bos; i++) {
351b78edd46SQiang Yu 		struct lima_bo *bo = task->bos[i];
352b78edd46SQiang Yu 		void *data;
353b78edd46SQiang Yu 
354b78edd46SQiang Yu 		memset(buffer_chunk, 0, sizeof(*buffer_chunk));
355b78edd46SQiang Yu 		buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
356b78edd46SQiang Yu 		buffer_chunk->va = lima_vm_get_va(task->vm, bo);
357b78edd46SQiang Yu 
358b78edd46SQiang Yu 		if (bo->heap_size) {
359b78edd46SQiang Yu 			buffer_chunk->size = bo->heap_size;
360b78edd46SQiang Yu 
361b78edd46SQiang Yu 			data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
362b78edd46SQiang Yu 				    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
363b78edd46SQiang Yu 			if (!data) {
364b78edd46SQiang Yu 				kvfree(et);
365b78edd46SQiang Yu 				goto out;
366b78edd46SQiang Yu 			}
367b78edd46SQiang Yu 
368b78edd46SQiang Yu 			memcpy(buffer_chunk + 1, data, buffer_chunk->size);
369b78edd46SQiang Yu 
370b78edd46SQiang Yu 			vunmap(data);
371b78edd46SQiang Yu 		} else {
372b78edd46SQiang Yu 			buffer_chunk->size = lima_bo_size(bo);
373b78edd46SQiang Yu 
37479e2cf2eSDmitry Osipenko 			ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
37549a3f51dSThomas Zimmermann 			if (ret) {
376b78edd46SQiang Yu 				kvfree(et);
377b78edd46SQiang Yu 				goto out;
378b78edd46SQiang Yu 			}
379b78edd46SQiang Yu 
38049a3f51dSThomas Zimmermann 			memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
381b78edd46SQiang Yu 
38279e2cf2eSDmitry Osipenko 			drm_gem_vunmap_unlocked(&bo->base.base, &map);
383b78edd46SQiang Yu 		}
384b78edd46SQiang Yu 
385b78edd46SQiang Yu 		buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
386b78edd46SQiang Yu 		dt->num_chunks++;
387b78edd46SQiang Yu 	}
388b78edd46SQiang Yu 
389b78edd46SQiang Yu 	list_add(&et->list, &dev->error_task_list);
390b78edd46SQiang Yu 	dev->dump.size += et->size;
391b78edd46SQiang Yu 	dev->dump.num_tasks++;
392b78edd46SQiang Yu 
393b78edd46SQiang Yu 	dev_info(dev->dev, "save error task state success\n");
394b78edd46SQiang Yu 
395b78edd46SQiang Yu out:
396b78edd46SQiang Yu 	mutex_unlock(&dev->error_task_list_lock);
397b78edd46SQiang Yu }
398b78edd46SQiang Yu 
399a6a1f036SLuben Tuikov static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
400a1d2a633SQiang Yu {
4015c544dafSQiang Yu 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
4025c544dafSQiang Yu 	struct lima_sched_task *task = to_lima_task(job);
40350de2e9eSQiang Yu 	struct lima_device *ldev = pipe->ldev;
40453cb55b2SErico Nunes 	struct lima_ip *ip = pipe->processor[0];
40553cb55b2SErico Nunes 	int i;
40653cb55b2SErico Nunes 
40753cb55b2SErico Nunes 	/*
40853cb55b2SErico Nunes 	 * If the GPU managed to complete this jobs fence, the timeout is
40953cb55b2SErico Nunes 	 * spurious. Bail out.
41053cb55b2SErico Nunes 	 */
41153cb55b2SErico Nunes 	if (dma_fence_is_signaled(task->fence)) {
41253cb55b2SErico Nunes 		DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
41353cb55b2SErico Nunes 		return DRM_GPU_SCHED_STAT_NOMINAL;
41453cb55b2SErico Nunes 	}
41553cb55b2SErico Nunes 
41653cb55b2SErico Nunes 	/*
41753cb55b2SErico Nunes 	 * Lima IRQ handler may take a long time to process an interrupt
41853cb55b2SErico Nunes 	 * if there is another IRQ handler hogging the processing.
41953cb55b2SErico Nunes 	 * In order to catch such cases and not report spurious Lima job
42053cb55b2SErico Nunes 	 * timeouts, synchronize the IRQ handler and re-check the fence
42153cb55b2SErico Nunes 	 * status.
42253cb55b2SErico Nunes 	 */
42353cb55b2SErico Nunes 	for (i = 0; i < pipe->num_processor; i++)
42453cb55b2SErico Nunes 		synchronize_irq(pipe->processor[i]->irq);
42553cb55b2SErico Nunes 
42653cb55b2SErico Nunes 	if (dma_fence_is_signaled(task->fence)) {
42753cb55b2SErico Nunes 		DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
42853cb55b2SErico Nunes 		return DRM_GPU_SCHED_STAT_NOMINAL;
42953cb55b2SErico Nunes 	}
4305c544dafSQiang Yu 
4315c544dafSQiang Yu 	if (!pipe->error)
43253cb55b2SErico Nunes 		DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
4335c544dafSQiang Yu 
4345918045cSChristian König 	drm_sched_stop(&pipe->base, &task->base);
435a1d2a633SQiang Yu 
436a1d2a633SQiang Yu 	drm_sched_increase_karma(&task->base);
437a1d2a633SQiang Yu 
438c49fcb5cSErico Nunes 	if (lima_max_error_tasks)
439b78edd46SQiang Yu 		lima_sched_build_error_task_list(task);
440b78edd46SQiang Yu 
441a1d2a633SQiang Yu 	pipe->task_error(pipe);
442a1d2a633SQiang Yu 
443a1d2a633SQiang Yu 	if (pipe->bcast_mmu)
444a1d2a633SQiang Yu 		lima_mmu_page_fault_resume(pipe->bcast_mmu);
445a1d2a633SQiang Yu 	else {
446a1d2a633SQiang Yu 		for (i = 0; i < pipe->num_mmu; i++)
447a1d2a633SQiang Yu 			lima_mmu_page_fault_resume(pipe->mmu[i]);
448a1d2a633SQiang Yu 	}
449a1d2a633SQiang Yu 
450a1d2a633SQiang Yu 	lima_vm_put(pipe->current_vm);
451a1d2a633SQiang Yu 	pipe->current_vm = NULL;
452a1d2a633SQiang Yu 	pipe->current_task = NULL;
453a1d2a633SQiang Yu 
45450de2e9eSQiang Yu 	lima_pm_idle(ldev);
45519969707SMartin Blumenstingl 
456a1d2a633SQiang Yu 	drm_sched_resubmit_jobs(&pipe->base);
457a1d2a633SQiang Yu 	drm_sched_start(&pipe->base, true);
458a6a1f036SLuben Tuikov 
459a6a1f036SLuben Tuikov 	return DRM_GPU_SCHED_STAT_NOMINAL;
460a1d2a633SQiang Yu }
461a1d2a633SQiang Yu 
462a1d2a633SQiang Yu static void lima_sched_free_job(struct drm_sched_job *job)
463a1d2a633SQiang Yu {
464a1d2a633SQiang Yu 	struct lima_sched_task *task = to_lima_task(job);
465a1d2a633SQiang Yu 	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
466a1d2a633SQiang Yu 	struct lima_vm *vm = task->vm;
467a1d2a633SQiang Yu 	struct lima_bo **bos = task->bos;
468a1d2a633SQiang Yu 	int i;
469a1d2a633SQiang Yu 
470a1d2a633SQiang Yu 	dma_fence_put(task->fence);
471a1d2a633SQiang Yu 
472a1d2a633SQiang Yu 	for (i = 0; i < task->num_bos; i++)
473a1d2a633SQiang Yu 		lima_vm_bo_del(vm, bos[i]);
474a1d2a633SQiang Yu 
475a1d2a633SQiang Yu 	lima_sched_task_fini(task);
476a1d2a633SQiang Yu 	kmem_cache_free(pipe->task_slab, task);
477a1d2a633SQiang Yu }
478a1d2a633SQiang Yu 
479d48ae1f0SYueHaibing static const struct drm_sched_backend_ops lima_sched_ops = {
480a1d2a633SQiang Yu 	.run_job = lima_sched_run_job,
481a1d2a633SQiang Yu 	.timedout_job = lima_sched_timedout_job,
482a1d2a633SQiang Yu 	.free_job = lima_sched_free_job,
483a1d2a633SQiang Yu };
484a1d2a633SQiang Yu 
4852081e8dcSQiang Yu static void lima_sched_recover_work(struct work_struct *work)
4862081e8dcSQiang Yu {
4872081e8dcSQiang Yu 	struct lima_sched_pipe *pipe =
4882081e8dcSQiang Yu 		container_of(work, struct lima_sched_pipe, recover_work);
4892081e8dcSQiang Yu 	int i;
4902081e8dcSQiang Yu 
4912081e8dcSQiang Yu 	for (i = 0; i < pipe->num_l2_cache; i++)
4922081e8dcSQiang Yu 		lima_l2_cache_flush(pipe->l2_cache[i]);
4932081e8dcSQiang Yu 
4942081e8dcSQiang Yu 	if (pipe->bcast_mmu) {
4952081e8dcSQiang Yu 		lima_mmu_flush_tlb(pipe->bcast_mmu);
4962081e8dcSQiang Yu 	} else {
4972081e8dcSQiang Yu 		for (i = 0; i < pipe->num_mmu; i++)
4982081e8dcSQiang Yu 			lima_mmu_flush_tlb(pipe->mmu[i]);
4992081e8dcSQiang Yu 	}
5002081e8dcSQiang Yu 
5012081e8dcSQiang Yu 	if (pipe->task_recover(pipe))
5022081e8dcSQiang Yu 		drm_sched_fault(&pipe->base);
5032081e8dcSQiang Yu }
5042081e8dcSQiang Yu 
505a1d2a633SQiang Yu int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
506a1d2a633SQiang Yu {
50747ab1457SErico Nunes 	unsigned int timeout = lima_sched_timeout_ms > 0 ?
50847ab1457SErico Nunes 			       lima_sched_timeout_ms : 500;
509a1d2a633SQiang Yu 
510a1d2a633SQiang Yu 	pipe->fence_context = dma_fence_context_alloc(1);
511a1d2a633SQiang Yu 	spin_lock_init(&pipe->fence_lock);
512a1d2a633SQiang Yu 
5132081e8dcSQiang Yu 	INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
5142081e8dcSQiang Yu 
515a6149f03SMatthew Brost 	return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
51656e44960SLuben Tuikov 			      DRM_SCHED_PRIORITY_COUNT,
51756e44960SLuben Tuikov 			      1,
51878efe21bSBoris Brezillon 			      lima_job_hang_limit,
51978efe21bSBoris Brezillon 			      msecs_to_jiffies(timeout), NULL,
5208ab62edaSJiawei Gu 			      NULL, name, pipe->ldev->dev);
521a1d2a633SQiang Yu }
522a1d2a633SQiang Yu 
523a1d2a633SQiang Yu void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
524a1d2a633SQiang Yu {
525a1d2a633SQiang Yu 	drm_sched_fini(&pipe->base);
526a1d2a633SQiang Yu }
527a1d2a633SQiang Yu 
528a1d2a633SQiang Yu void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
529a1d2a633SQiang Yu {
530a1d2a633SQiang Yu 	struct lima_sched_task *task = pipe->current_task;
53150de2e9eSQiang Yu 	struct lima_device *ldev = pipe->ldev;
532a1d2a633SQiang Yu 
5332081e8dcSQiang Yu 	if (pipe->error) {
5342081e8dcSQiang Yu 		if (task && task->recoverable)
5352081e8dcSQiang Yu 			schedule_work(&pipe->recover_work);
5362081e8dcSQiang Yu 		else
5372081e8dcSQiang Yu 			drm_sched_fault(&pipe->base);
5382081e8dcSQiang Yu 	} else {
539a1d2a633SQiang Yu 		pipe->task_fini(pipe);
540a1d2a633SQiang Yu 		dma_fence_signal(task->fence);
54119969707SMartin Blumenstingl 
54250de2e9eSQiang Yu 		lima_pm_idle(ldev);
543a1d2a633SQiang Yu 	}
544a1d2a633SQiang Yu }
545