xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_sched.c (revision ea49432d184a6a09f84461604b7711a4e9f5ec9c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Etnaviv Project
4  */
5 
6 #include <linux/moduleparam.h>
7 
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14 
15 static int etnaviv_job_hang_limit = 0;
16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
17 static int etnaviv_hw_jobs_limit = 4;
18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19 
20 static struct dma_fence *
21 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
22 			 struct drm_sched_entity *entity)
23 {
24 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
25 	struct dma_fence *fence;
26 	int i;
27 
28 	if (unlikely(submit->in_fence)) {
29 		fence = submit->in_fence;
30 		submit->in_fence = NULL;
31 
32 		if (!dma_fence_is_signaled(fence))
33 			return fence;
34 
35 		dma_fence_put(fence);
36 	}
37 
38 	for (i = 0; i < submit->nr_bos; i++) {
39 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
40 		int j;
41 
42 		if (bo->excl) {
43 			fence = bo->excl;
44 			bo->excl = NULL;
45 
46 			if (!dma_fence_is_signaled(fence))
47 				return fence;
48 
49 			dma_fence_put(fence);
50 		}
51 
52 		for (j = 0; j < bo->nr_shared; j++) {
53 			if (!bo->shared[j])
54 				continue;
55 
56 			fence = bo->shared[j];
57 			bo->shared[j] = NULL;
58 
59 			if (!dma_fence_is_signaled(fence))
60 				return fence;
61 
62 			dma_fence_put(fence);
63 		}
64 		kfree(bo->shared);
65 		bo->nr_shared = 0;
66 		bo->shared = NULL;
67 	}
68 
69 	return NULL;
70 }
71 
72 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73 {
74 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
75 	struct dma_fence *fence = NULL;
76 
77 	if (likely(!sched_job->s_fence->finished.error))
78 		fence = etnaviv_gpu_submit(submit);
79 	else
80 		dev_dbg(submit->gpu->dev, "skipping bad job\n");
81 
82 	return fence;
83 }
84 
85 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
86 							  *sched_job)
87 {
88 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
89 	struct etnaviv_gpu *gpu = submit->gpu;
90 	u32 dma_addr;
91 	int change;
92 
93 	/* block scheduler */
94 	drm_sched_stop(&gpu->sched, sched_job);
95 
96 	/*
97 	 * If the GPU managed to complete this jobs fence, the timout is
98 	 * spurious. Bail out.
99 	 */
100 	if (dma_fence_is_signaled(submit->out_fence))
101 		goto out_no_timeout;
102 
103 	/*
104 	 * If the GPU is still making forward progress on the front-end (which
105 	 * should never loop) we shift out the timeout to give it a chance to
106 	 * finish the job.
107 	 */
108 	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
109 	change = dma_addr - gpu->hangcheck_dma_addr;
110 	if (gpu->completed_fence != gpu->hangcheck_fence ||
111 	    change < 0 || change > 16) {
112 		gpu->hangcheck_dma_addr = dma_addr;
113 		gpu->hangcheck_fence = gpu->completed_fence;
114 		goto out_no_timeout;
115 	}
116 
117 	if(sched_job)
118 		drm_sched_increase_karma(sched_job);
119 
120 	/* get the GPU back into the init state */
121 	etnaviv_core_dump(submit);
122 	etnaviv_gpu_recover_hang(gpu);
123 
124 	drm_sched_resubmit_jobs(&gpu->sched);
125 
126 	drm_sched_start(&gpu->sched, true);
127 	return DRM_GPU_SCHED_STAT_NOMINAL;
128 
129 out_no_timeout:
130 	/* restart scheduler after GPU is usable again */
131 	drm_sched_start(&gpu->sched, true);
132 	return DRM_GPU_SCHED_STAT_NOMINAL;
133 }
134 
135 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
136 {
137 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
138 
139 	drm_sched_job_cleanup(sched_job);
140 
141 	etnaviv_submit_put(submit);
142 }
143 
144 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
145 	.dependency = etnaviv_sched_dependency,
146 	.run_job = etnaviv_sched_run_job,
147 	.timedout_job = etnaviv_sched_timedout_job,
148 	.free_job = etnaviv_sched_free_job,
149 };
150 
151 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
152 			   struct etnaviv_gem_submit *submit)
153 {
154 	int ret = 0;
155 
156 	/*
157 	 * Hold the fence lock across the whole operation to avoid jobs being
158 	 * pushed out of order with regard to their sched fence seqnos as
159 	 * allocated in drm_sched_job_init.
160 	 */
161 	mutex_lock(&submit->gpu->fence_lock);
162 
163 	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
164 				 submit->ctx);
165 	if (ret)
166 		goto out_unlock;
167 
168 	drm_sched_job_arm(&submit->sched_job);
169 
170 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
171 	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
172 						submit->out_fence, 0,
173 						INT_MAX, GFP_KERNEL);
174 	if (submit->out_fence_id < 0) {
175 		drm_sched_job_cleanup(&submit->sched_job);
176 		ret = -ENOMEM;
177 		goto out_unlock;
178 	}
179 
180 	/* the scheduler holds on to the job now */
181 	kref_get(&submit->refcount);
182 
183 	drm_sched_entity_push_job(&submit->sched_job);
184 
185 out_unlock:
186 	mutex_unlock(&submit->gpu->fence_lock);
187 
188 	return ret;
189 }
190 
191 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
192 {
193 	int ret;
194 
195 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
196 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
197 			     msecs_to_jiffies(500), NULL, NULL,
198 			     dev_name(gpu->dev));
199 	if (ret)
200 		return ret;
201 
202 	return 0;
203 }
204 
205 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
206 {
207 	drm_sched_fini(&gpu->sched);
208 }
209