xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_sched.c (revision 683da226f88dde7bf68940c21418995b63baae2f)
1 /*
2  * Copyright (C) 2017 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <drm/gpu_scheduler.h>
18 #include <linux/kthread.h>
19 
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 
24 static int etnaviv_job_hang_limit = 0;
25 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
26 static int etnaviv_hw_jobs_limit = 2;
27 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
28 
29 static inline
30 struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
31 {
32 	return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
33 }
34 
35 struct dma_fence *etnaviv_sched_dependency(struct drm_sched_job *sched_job,
36 					   struct drm_sched_entity *entity)
37 {
38 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
39 	struct dma_fence *fence;
40 	int i;
41 
42 	if (unlikely(submit->in_fence)) {
43 		fence = submit->in_fence;
44 		submit->in_fence = NULL;
45 
46 		if (!dma_fence_is_signaled(fence))
47 			return fence;
48 
49 		dma_fence_put(fence);
50 	}
51 
52 	for (i = 0; i < submit->nr_bos; i++) {
53 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
54 		int j;
55 
56 		if (bo->excl) {
57 			fence = bo->excl;
58 			bo->excl = NULL;
59 
60 			if (!dma_fence_is_signaled(fence))
61 				return fence;
62 
63 			dma_fence_put(fence);
64 		}
65 
66 		for (j = 0; j < bo->nr_shared; j++) {
67 			if (!bo->shared[j])
68 				continue;
69 
70 			fence = bo->shared[j];
71 			bo->shared[j] = NULL;
72 
73 			if (!dma_fence_is_signaled(fence))
74 				return fence;
75 
76 			dma_fence_put(fence);
77 		}
78 		kfree(bo->shared);
79 		bo->nr_shared = 0;
80 		bo->shared = NULL;
81 	}
82 
83 	return NULL;
84 }
85 
86 struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
87 {
88 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
89 	struct dma_fence *fence;
90 
91 	mutex_lock(&submit->gpu->lock);
92 	list_add_tail(&submit->node, &submit->gpu->active_submit_list);
93 	mutex_unlock(&submit->gpu->lock);
94 
95 	fence = etnaviv_gpu_submit(submit);
96 	if (!fence) {
97 		etnaviv_submit_put(submit);
98 		return NULL;
99 	}
100 
101 	return fence;
102 }
103 
104 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
105 {
106 	/* this replaces the hangcheck */
107 }
108 
109 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
110 {
111 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
112 
113 	mutex_lock(&submit->gpu->lock);
114 	list_del(&submit->node);
115 	mutex_unlock(&submit->gpu->lock);
116 
117 	etnaviv_submit_put(submit);
118 }
119 
120 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
121 	.dependency = etnaviv_sched_dependency,
122 	.run_job = etnaviv_sched_run_job,
123 	.timedout_job = etnaviv_sched_timedout_job,
124 	.free_job = etnaviv_sched_free_job,
125 };
126 
127 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
128 			   struct etnaviv_gem_submit *submit)
129 {
130 	int ret;
131 
132 	ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
133 				 sched_entity, submit->cmdbuf.ctx);
134 	if (ret)
135 		return ret;
136 
137 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
138 	mutex_lock(&submit->gpu->fence_idr_lock);
139 	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
140 						submit->out_fence, 0,
141 						INT_MAX, GFP_KERNEL);
142 	mutex_unlock(&submit->gpu->fence_idr_lock);
143 	if (submit->out_fence_id < 0)
144 		return -ENOMEM;
145 
146 	/* the scheduler holds on to the job now */
147 	kref_get(&submit->refcount);
148 
149 	drm_sched_entity_push_job(&submit->sched_job, sched_entity);
150 
151 	return 0;
152 }
153 
154 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
155 {
156 	int ret;
157 
158 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
159 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
160 			     msecs_to_jiffies(500), dev_name(gpu->dev));
161 	if (ret)
162 		return ret;
163 
164 	return 0;
165 }
166 
167 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
168 {
169 	drm_sched_fini(&gpu->sched);
170 }
171