xref: /linux/drivers/gpu/drm/msm/adreno/adreno_device.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2014 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
7  */
8 
9 #include "adreno_gpu.h"
10 
11 bool hang_debug = false;
12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug, hang_debug, bool, 0600);
14 
15 bool snapshot_debugbus = false;
16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
18 
19 bool allow_vram_carveout = false;
20 MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
21 module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
22 
23 int enable_preemption = -1;
24 MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
25 module_param(enable_preemption, int, 0600);
26 
27 extern const struct adreno_gpulist a2xx_gpulist;
28 extern const struct adreno_gpulist a3xx_gpulist;
29 extern const struct adreno_gpulist a4xx_gpulist;
30 extern const struct adreno_gpulist a5xx_gpulist;
31 extern const struct adreno_gpulist a6xx_gpulist;
32 extern const struct adreno_gpulist a7xx_gpulist;
33 
34 static const struct adreno_gpulist *gpulists[] = {
35 	&a2xx_gpulist,
36 	&a3xx_gpulist,
37 	&a4xx_gpulist,
38 	&a5xx_gpulist,
39 	&a6xx_gpulist,
40 	&a7xx_gpulist,
41 };
42 
43 static const struct adreno_info *adreno_info(uint32_t chip_id)
44 {
45 	/* identify gpu: */
46 	for (int i = 0; i < ARRAY_SIZE(gpulists); i++) {
47 		for (int j = 0; j < gpulists[i]->gpus_count; j++) {
48 			const struct adreno_info *info = &gpulists[i]->gpus[j];
49 
50 			if (info->machine && !of_machine_is_compatible(info->machine))
51 				continue;
52 
53 			for (int k = 0; info->chip_ids[k]; k++)
54 				if (info->chip_ids[k] == chip_id)
55 					return info;
56 		}
57 	}
58 
59 	return NULL;
60 }
61 
62 struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
63 {
64 	struct msm_drm_private *priv = dev->dev_private;
65 	struct platform_device *pdev = priv->gpu_pdev;
66 	struct msm_gpu *gpu = NULL;
67 	struct adreno_gpu *adreno_gpu;
68 	int ret;
69 
70 	if (pdev)
71 		gpu = dev_to_gpu(&pdev->dev);
72 
73 	if (!gpu) {
74 		dev_err_once(dev->dev, "no GPU device was found\n");
75 		return NULL;
76 	}
77 
78 	adreno_gpu = to_adreno_gpu(gpu);
79 
80 	/*
81 	 * The number one reason for HW init to fail is if the firmware isn't
82 	 * loaded yet. Try that first and don't bother continuing on
83 	 * otherwise
84 	 */
85 
86 	ret = adreno_load_fw(adreno_gpu);
87 	if (ret)
88 		return NULL;
89 
90 	if (gpu->funcs->ucode_load) {
91 		ret = gpu->funcs->ucode_load(gpu);
92 		if (ret)
93 			return NULL;
94 	}
95 
96 	/*
97 	 * Now that we have firmware loaded, and are ready to begin
98 	 * booting the gpu, go ahead and enable runpm:
99 	 */
100 	pm_runtime_enable(&pdev->dev);
101 
102 	ret = pm_runtime_get_sync(&pdev->dev);
103 	if (ret < 0) {
104 		pm_runtime_put_noidle(&pdev->dev);
105 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
106 		goto err_disable_rpm;
107 	}
108 
109 	mutex_lock(&gpu->lock);
110 	ret = msm_gpu_hw_init(gpu);
111 	mutex_unlock(&gpu->lock);
112 	if (ret) {
113 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
114 		goto err_put_rpm;
115 	}
116 
117 	pm_runtime_put_autosuspend(&pdev->dev);
118 
119 #ifdef CONFIG_DEBUG_FS
120 	if (gpu->funcs->debugfs_init) {
121 		gpu->funcs->debugfs_init(gpu, dev->primary);
122 		gpu->funcs->debugfs_init(gpu, dev->render);
123 	}
124 #endif
125 
126 	return gpu;
127 
128 err_put_rpm:
129 	pm_runtime_put_sync_suspend(&pdev->dev);
130 err_disable_rpm:
131 	pm_runtime_disable(&pdev->dev);
132 
133 	return NULL;
134 }
135 
136 static int find_chipid(struct device *dev, uint32_t *chipid)
137 {
138 	struct device_node *node = dev->of_node;
139 	const char *compat;
140 	int ret;
141 
142 	/* first search the compat strings for qcom,adreno-XYZ.W: */
143 	ret = of_property_read_string_index(node, "compatible", 0, &compat);
144 	if (ret == 0) {
145 		unsigned int r, patch;
146 
147 		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
148 		    sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
149 			uint32_t core, major, minor;
150 
151 			core = r / 100;
152 			r %= 100;
153 			major = r / 10;
154 			r %= 10;
155 			minor = r;
156 
157 			*chipid = (core << 24) |
158 				(major << 16) |
159 				(minor << 8) |
160 				patch;
161 
162 			return 0;
163 		}
164 
165 		if (sscanf(compat, "qcom,adreno-%08x", chipid) == 1)
166 			return 0;
167 	}
168 
169 	/* and if that fails, fall back to legacy "qcom,chipid" property: */
170 	ret = of_property_read_u32(node, "qcom,chipid", chipid);
171 	if (ret) {
172 		DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
173 		return ret;
174 	}
175 
176 	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
177 
178 	return 0;
179 }
180 
181 static int adreno_bind(struct device *dev, struct device *master, void *data)
182 {
183 	static struct adreno_platform_config config = {};
184 	const struct adreno_info *info;
185 	struct msm_drm_private *priv = dev_get_drvdata(master);
186 	struct drm_device *drm = priv->dev;
187 	struct msm_gpu *gpu;
188 	int ret;
189 
190 	ret = find_chipid(dev, &config.chip_id);
191 	if (ret)
192 		return ret;
193 
194 	dev->platform_data = &config;
195 	priv->gpu_pdev = to_platform_device(dev);
196 
197 	info = adreno_info(config.chip_id);
198 	if (!info) {
199 		dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
200 			ADRENO_CHIPID_ARGS(config.chip_id));
201 		return -ENXIO;
202 	}
203 
204 	config.info = info;
205 
206 	DBG("Found GPU: %"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config.chip_id));
207 
208 	priv->is_a2xx = info->family < ADRENO_3XX;
209 	priv->has_cached_coherent =
210 		!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
211 
212 	gpu = info->init(drm);
213 	if (IS_ERR(gpu)) {
214 		dev_warn(drm->dev, "failed to load adreno gpu\n");
215 		return PTR_ERR(gpu);
216 	}
217 
218 	ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
219 	if (ret)
220 		return ret;
221 
222 	return 0;
223 }
224 
225 static int adreno_system_suspend(struct device *dev);
226 static void adreno_unbind(struct device *dev, struct device *master,
227 		void *data)
228 {
229 	struct msm_drm_private *priv = dev_get_drvdata(master);
230 	struct msm_gpu *gpu = dev_to_gpu(dev);
231 
232 	if (pm_runtime_enabled(dev))
233 		WARN_ON_ONCE(adreno_system_suspend(dev));
234 	gpu->funcs->destroy(gpu);
235 
236 	priv->gpu_pdev = NULL;
237 }
238 
239 static const struct component_ops a3xx_ops = {
240 	.bind   = adreno_bind,
241 	.unbind = adreno_unbind,
242 };
243 
244 static void adreno_device_register_headless(void)
245 {
246 	/* on imx5, we don't have a top-level mdp/dpu node
247 	 * this creates a dummy node for the driver for that case
248 	 */
249 	struct platform_device_info dummy_info = {
250 		.parent = NULL,
251 		.name = "msm",
252 		.id = -1,
253 		.res = NULL,
254 		.num_res = 0,
255 		.data = NULL,
256 		.size_data = 0,
257 		.dma_mask = ~0,
258 	};
259 	platform_device_register_full(&dummy_info);
260 }
261 
262 static int adreno_probe(struct platform_device *pdev)
263 {
264 
265 	int ret;
266 
267 	ret = component_add(&pdev->dev, &a3xx_ops);
268 	if (ret)
269 		return ret;
270 
271 	if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
272 		adreno_device_register_headless();
273 
274 	return 0;
275 }
276 
277 static void adreno_remove(struct platform_device *pdev)
278 {
279 	component_del(&pdev->dev, &a3xx_ops);
280 }
281 
282 static void adreno_shutdown(struct platform_device *pdev)
283 {
284 	WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
285 }
286 
287 static const struct of_device_id dt_match[] = {
288 	{ .compatible = "qcom,adreno" },
289 	{ .compatible = "qcom,adreno-3xx" },
290 	/* for compatibility with imx5 gpu: */
291 	{ .compatible = "amd,imageon" },
292 	/* for backwards compat w/ downstream kgsl DT files: */
293 	{ .compatible = "qcom,kgsl-3d0" },
294 	{}
295 };
296 
297 static int adreno_runtime_resume(struct device *dev)
298 {
299 	struct msm_gpu *gpu = dev_to_gpu(dev);
300 
301 	return gpu->funcs->pm_resume(gpu);
302 }
303 
304 static int adreno_runtime_suspend(struct device *dev)
305 {
306 	struct msm_gpu *gpu = dev_to_gpu(dev);
307 
308 	/*
309 	 * We should be holding a runpm ref, which will prevent
310 	 * runtime suspend.  In the system suspend path, we've
311 	 * already waited for active jobs to complete.
312 	 */
313 	WARN_ON_ONCE(gpu->active_submits);
314 
315 	return gpu->funcs->pm_suspend(gpu);
316 }
317 
318 static void suspend_scheduler(struct msm_gpu *gpu)
319 {
320 	int i;
321 
322 	/*
323 	 * Shut down the scheduler before we force suspend, so that
324 	 * suspend isn't racing with scheduler kthread feeding us
325 	 * more work.
326 	 *
327 	 * Note, we just want to park the thread, and let any jobs
328 	 * that are already on the hw queue complete normally, as
329 	 * opposed to the drm_sched_stop() path used for handling
330 	 * faulting/timed-out jobs.  We can't really cancel any jobs
331 	 * already on the hw queue without racing with the GPU.
332 	 */
333 	for (i = 0; i < gpu->nr_rings; i++) {
334 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
335 
336 		drm_sched_wqueue_stop(sched);
337 	}
338 }
339 
340 static void resume_scheduler(struct msm_gpu *gpu)
341 {
342 	int i;
343 
344 	for (i = 0; i < gpu->nr_rings; i++) {
345 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
346 
347 		drm_sched_wqueue_start(sched);
348 	}
349 }
350 
351 static int adreno_system_suspend(struct device *dev)
352 {
353 	struct msm_gpu *gpu = dev_to_gpu(dev);
354 	int remaining, ret;
355 
356 	if (!gpu)
357 		return 0;
358 
359 	suspend_scheduler(gpu);
360 
361 	remaining = wait_event_timeout(gpu->retire_event,
362 				       gpu->active_submits == 0,
363 				       msecs_to_jiffies(1000));
364 	if (remaining == 0) {
365 		dev_err(dev, "Timeout waiting for GPU to suspend\n");
366 		ret = -EBUSY;
367 		goto out;
368 	}
369 
370 	ret = pm_runtime_force_suspend(dev);
371 out:
372 	if (ret)
373 		resume_scheduler(gpu);
374 
375 	return ret;
376 }
377 
378 static int adreno_system_resume(struct device *dev)
379 {
380 	struct msm_gpu *gpu = dev_to_gpu(dev);
381 
382 	if (!gpu)
383 		return 0;
384 
385 	resume_scheduler(gpu);
386 	return pm_runtime_force_resume(dev);
387 }
388 
389 static const struct dev_pm_ops adreno_pm_ops = {
390 	SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
391 	RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
392 };
393 
394 static struct platform_driver adreno_driver = {
395 	.probe = adreno_probe,
396 	.remove_new = adreno_remove,
397 	.shutdown = adreno_shutdown,
398 	.driver = {
399 		.name = "adreno",
400 		.of_match_table = dt_match,
401 		.pm = &adreno_pm_ops,
402 	},
403 };
404 
405 void __init adreno_register(void)
406 {
407 	platform_driver_register(&adreno_driver);
408 }
409 
410 void __exit adreno_unregister(void)
411 {
412 	platform_driver_unregister(&adreno_driver);
413 }
414