xref: /linux/drivers/gpu/drm/msm/msm_gpu.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_gpu.h"
19 #include "msm_gem.h"
20 
21 
22 /*
23  * Power Management:
24  */
25 
26 #ifdef CONFIG_MSM_BUS_SCALING
27 #include <mach/board.h>
28 #include <mach/kgsl.h>
29 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30 {
31 	struct drm_device *dev = gpu->dev;
32 	struct kgsl_device_platform_data *pdata;
33 
34 	if (!pdev) {
35 		dev_err(dev->dev, "could not find dtv pdata\n");
36 		return;
37 	}
38 
39 	pdata = pdev->dev.platform_data;
40 	if (pdata->bus_scale_table) {
41 		gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
42 		DBG("bus scale client: %08x", gpu->bsc);
43 	}
44 }
45 
46 static void bs_fini(struct msm_gpu *gpu)
47 {
48 	if (gpu->bsc) {
49 		msm_bus_scale_unregister_client(gpu->bsc);
50 		gpu->bsc = 0;
51 	}
52 }
53 
54 static void bs_set(struct msm_gpu *gpu, int idx)
55 {
56 	if (gpu->bsc) {
57 		DBG("set bus scaling: %d", idx);
58 		msm_bus_scale_client_update_request(gpu->bsc, idx);
59 	}
60 }
61 #else
62 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
63 static void bs_fini(struct msm_gpu *gpu) {}
64 static void bs_set(struct msm_gpu *gpu, int idx) {}
65 #endif
66 
67 static int enable_pwrrail(struct msm_gpu *gpu)
68 {
69 	struct drm_device *dev = gpu->dev;
70 	int ret = 0;
71 
72 	if (gpu->gpu_reg) {
73 		ret = regulator_enable(gpu->gpu_reg);
74 		if (ret) {
75 			dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
76 			return ret;
77 		}
78 	}
79 
80 	if (gpu->gpu_cx) {
81 		ret = regulator_enable(gpu->gpu_cx);
82 		if (ret) {
83 			dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
84 			return ret;
85 		}
86 	}
87 
88 	return 0;
89 }
90 
91 static int disable_pwrrail(struct msm_gpu *gpu)
92 {
93 	if (gpu->gpu_cx)
94 		regulator_disable(gpu->gpu_cx);
95 	if (gpu->gpu_reg)
96 		regulator_disable(gpu->gpu_reg);
97 	return 0;
98 }
99 
100 static int enable_clk(struct msm_gpu *gpu)
101 {
102 	struct clk *rate_clk = NULL;
103 	int i;
104 
105 	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
106 	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
107 		if (gpu->grp_clks[i]) {
108 			clk_prepare(gpu->grp_clks[i]);
109 			rate_clk = gpu->grp_clks[i];
110 		}
111 	}
112 
113 	if (rate_clk && gpu->fast_rate)
114 		clk_set_rate(rate_clk, gpu->fast_rate);
115 
116 	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
117 		if (gpu->grp_clks[i])
118 			clk_enable(gpu->grp_clks[i]);
119 
120 	return 0;
121 }
122 
123 static int disable_clk(struct msm_gpu *gpu)
124 {
125 	struct clk *rate_clk = NULL;
126 	int i;
127 
128 	/* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
129 	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
130 		if (gpu->grp_clks[i]) {
131 			clk_disable(gpu->grp_clks[i]);
132 			rate_clk = gpu->grp_clks[i];
133 		}
134 	}
135 
136 	if (rate_clk && gpu->slow_rate)
137 		clk_set_rate(rate_clk, gpu->slow_rate);
138 
139 	for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
140 		if (gpu->grp_clks[i])
141 			clk_unprepare(gpu->grp_clks[i]);
142 
143 	return 0;
144 }
145 
146 static int enable_axi(struct msm_gpu *gpu)
147 {
148 	if (gpu->ebi1_clk)
149 		clk_prepare_enable(gpu->ebi1_clk);
150 	if (gpu->bus_freq)
151 		bs_set(gpu, gpu->bus_freq);
152 	return 0;
153 }
154 
155 static int disable_axi(struct msm_gpu *gpu)
156 {
157 	if (gpu->ebi1_clk)
158 		clk_disable_unprepare(gpu->ebi1_clk);
159 	if (gpu->bus_freq)
160 		bs_set(gpu, 0);
161 	return 0;
162 }
163 
164 int msm_gpu_pm_resume(struct msm_gpu *gpu)
165 {
166 	int ret;
167 
168 	DBG("%s", gpu->name);
169 
170 	ret = enable_pwrrail(gpu);
171 	if (ret)
172 		return ret;
173 
174 	ret = enable_clk(gpu);
175 	if (ret)
176 		return ret;
177 
178 	ret = enable_axi(gpu);
179 	if (ret)
180 		return ret;
181 
182 	return 0;
183 }
184 
185 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
186 {
187 	int ret;
188 
189 	DBG("%s", gpu->name);
190 
191 	ret = disable_axi(gpu);
192 	if (ret)
193 		return ret;
194 
195 	ret = disable_clk(gpu);
196 	if (ret)
197 		return ret;
198 
199 	ret = disable_pwrrail(gpu);
200 	if (ret)
201 		return ret;
202 
203 	return 0;
204 }
205 
206 /*
207  * Hangcheck detection for locked gpu:
208  */
209 
210 static void recover_worker(struct work_struct *work)
211 {
212 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
213 	struct drm_device *dev = gpu->dev;
214 
215 	dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
216 
217 	mutex_lock(&dev->struct_mutex);
218 	gpu->funcs->recover(gpu);
219 	mutex_unlock(&dev->struct_mutex);
220 
221 	msm_gpu_retire(gpu);
222 }
223 
224 static void hangcheck_timer_reset(struct msm_gpu *gpu)
225 {
226 	DBG("%s", gpu->name);
227 	mod_timer(&gpu->hangcheck_timer,
228 			round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
229 }
230 
231 static void hangcheck_handler(unsigned long data)
232 {
233 	struct msm_gpu *gpu = (struct msm_gpu *)data;
234 	struct drm_device *dev = gpu->dev;
235 	struct msm_drm_private *priv = dev->dev_private;
236 	uint32_t fence = gpu->funcs->last_fence(gpu);
237 
238 	if (fence != gpu->hangcheck_fence) {
239 		/* some progress has been made.. ya! */
240 		gpu->hangcheck_fence = fence;
241 	} else if (fence < gpu->submitted_fence) {
242 		/* no progress and not done.. hung! */
243 		gpu->hangcheck_fence = fence;
244 		dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
245 				gpu->name);
246 		dev_err(dev->dev, "%s:     completed fence: %u\n",
247 				gpu->name, fence);
248 		dev_err(dev->dev, "%s:     submitted fence: %u\n",
249 				gpu->name, gpu->submitted_fence);
250 		queue_work(priv->wq, &gpu->recover_work);
251 	}
252 
253 	/* if still more pending work, reset the hangcheck timer: */
254 	if (gpu->submitted_fence > gpu->hangcheck_fence)
255 		hangcheck_timer_reset(gpu);
256 
257 	/* workaround for missing irq: */
258 	queue_work(priv->wq, &gpu->retire_work);
259 }
260 
261 /*
262  * Cmdstream submission/retirement:
263  */
264 
265 static void retire_worker(struct work_struct *work)
266 {
267 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
268 	struct drm_device *dev = gpu->dev;
269 	uint32_t fence = gpu->funcs->last_fence(gpu);
270 
271 	msm_update_fence(gpu->dev, fence);
272 
273 	mutex_lock(&dev->struct_mutex);
274 
275 	while (!list_empty(&gpu->active_list)) {
276 		struct msm_gem_object *obj;
277 
278 		obj = list_first_entry(&gpu->active_list,
279 				struct msm_gem_object, mm_list);
280 
281 		if ((obj->read_fence <= fence) &&
282 				(obj->write_fence <= fence)) {
283 			/* move to inactive: */
284 			msm_gem_move_to_inactive(&obj->base);
285 			msm_gem_put_iova(&obj->base, gpu->id);
286 			drm_gem_object_unreference(&obj->base);
287 		} else {
288 			break;
289 		}
290 	}
291 
292 	mutex_unlock(&dev->struct_mutex);
293 }
294 
295 /* call from irq handler to schedule work to retire bo's */
296 void msm_gpu_retire(struct msm_gpu *gpu)
297 {
298 	struct msm_drm_private *priv = gpu->dev->dev_private;
299 	queue_work(priv->wq, &gpu->retire_work);
300 }
301 
302 /* add bo's to gpu's ring, and kick gpu: */
303 int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
304 		struct msm_file_private *ctx)
305 {
306 	struct drm_device *dev = gpu->dev;
307 	struct msm_drm_private *priv = dev->dev_private;
308 	int i, ret;
309 
310 	mutex_lock(&dev->struct_mutex);
311 
312 	submit->fence = ++priv->next_fence;
313 
314 	gpu->submitted_fence = submit->fence;
315 
316 	ret = gpu->funcs->submit(gpu, submit, ctx);
317 	priv->lastctx = ctx;
318 
319 	for (i = 0; i < submit->nr_bos; i++) {
320 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
321 
322 		/* can't happen yet.. but when we add 2d support we'll have
323 		 * to deal w/ cross-ring synchronization:
324 		 */
325 		WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
326 
327 		if (!is_active(msm_obj)) {
328 			uint32_t iova;
329 
330 			/* ring takes a reference to the bo and iova: */
331 			drm_gem_object_reference(&msm_obj->base);
332 			msm_gem_get_iova_locked(&msm_obj->base,
333 					submit->gpu->id, &iova);
334 		}
335 
336 		if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
337 			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
338 
339 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
340 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
341 	}
342 	hangcheck_timer_reset(gpu);
343 	mutex_unlock(&dev->struct_mutex);
344 
345 	return ret;
346 }
347 
348 /*
349  * Init/Cleanup:
350  */
351 
352 static irqreturn_t irq_handler(int irq, void *data)
353 {
354 	struct msm_gpu *gpu = data;
355 	return gpu->funcs->irq(gpu);
356 }
357 
358 static const char *clk_names[] = {
359 		"src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
360 };
361 
362 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
363 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
364 		const char *name, const char *ioname, const char *irqname, int ringsz)
365 {
366 	int i, ret;
367 
368 	gpu->dev = drm;
369 	gpu->funcs = funcs;
370 	gpu->name = name;
371 
372 	INIT_LIST_HEAD(&gpu->active_list);
373 	INIT_WORK(&gpu->retire_work, retire_worker);
374 	INIT_WORK(&gpu->recover_work, recover_worker);
375 
376 	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
377 			(unsigned long)gpu);
378 
379 	BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
380 
381 	/* Map registers: */
382 	gpu->mmio = msm_ioremap(pdev, ioname, name);
383 	if (IS_ERR(gpu->mmio)) {
384 		ret = PTR_ERR(gpu->mmio);
385 		goto fail;
386 	}
387 
388 	/* Get Interrupt: */
389 	gpu->irq = platform_get_irq_byname(pdev, irqname);
390 	if (gpu->irq < 0) {
391 		ret = gpu->irq;
392 		dev_err(drm->dev, "failed to get irq: %d\n", ret);
393 		goto fail;
394 	}
395 
396 	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
397 			IRQF_TRIGGER_HIGH, gpu->name, gpu);
398 	if (ret) {
399 		dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
400 		goto fail;
401 	}
402 
403 	/* Acquire clocks: */
404 	for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
405 		gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
406 		DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
407 		if (IS_ERR(gpu->grp_clks[i]))
408 			gpu->grp_clks[i] = NULL;
409 	}
410 
411 	gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
412 	DBG("ebi1_clk: %p", gpu->ebi1_clk);
413 	if (IS_ERR(gpu->ebi1_clk))
414 		gpu->ebi1_clk = NULL;
415 
416 	/* Acquire regulators: */
417 	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
418 	DBG("gpu_reg: %p", gpu->gpu_reg);
419 	if (IS_ERR(gpu->gpu_reg))
420 		gpu->gpu_reg = NULL;
421 
422 	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
423 	DBG("gpu_cx: %p", gpu->gpu_cx);
424 	if (IS_ERR(gpu->gpu_cx))
425 		gpu->gpu_cx = NULL;
426 
427 	/* Setup IOMMU.. eventually we will (I think) do this once per context
428 	 * and have separate page tables per context.  For now, to keep things
429 	 * simple and to get something working, just use a single address space:
430 	 */
431 	gpu->iommu = iommu_domain_alloc(&platform_bus_type);
432 	if (!gpu->iommu) {
433 		dev_err(drm->dev, "failed to allocate IOMMU\n");
434 		ret = -ENOMEM;
435 		goto fail;
436 	}
437 	gpu->id = msm_register_iommu(drm, gpu->iommu);
438 
439 	/* Create ringbuffer: */
440 	gpu->rb = msm_ringbuffer_new(gpu, ringsz);
441 	if (IS_ERR(gpu->rb)) {
442 		ret = PTR_ERR(gpu->rb);
443 		gpu->rb = NULL;
444 		dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
445 		goto fail;
446 	}
447 
448 	ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
449 	if (ret) {
450 		gpu->rb_iova = 0;
451 		dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
452 		goto fail;
453 	}
454 
455 	bs_init(gpu, pdev);
456 
457 	return 0;
458 
459 fail:
460 	return ret;
461 }
462 
463 void msm_gpu_cleanup(struct msm_gpu *gpu)
464 {
465 	DBG("%s", gpu->name);
466 
467 	WARN_ON(!list_empty(&gpu->active_list));
468 
469 	bs_fini(gpu);
470 
471 	if (gpu->rb) {
472 		if (gpu->rb_iova)
473 			msm_gem_put_iova(gpu->rb->bo, gpu->id);
474 		msm_ringbuffer_destroy(gpu->rb);
475 	}
476 
477 	if (gpu->iommu)
478 		iommu_domain_free(gpu->iommu);
479 }
480