1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2014 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
7 */
8
9 #include "adreno_gpu.h"
10
11 bool hang_debug = false;
12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug, hang_debug, bool, 0600);
14
15 bool snapshot_debugbus = false;
16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
18
19 int enable_preemption = -1;
20 MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
21 module_param(enable_preemption, int, 0600);
22
23 bool disable_acd;
24 MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
25 module_param_unsafe(disable_acd, bool, 0400);
26
27 extern const struct adreno_gpulist a2xx_gpulist;
28 extern const struct adreno_gpulist a3xx_gpulist;
29 extern const struct adreno_gpulist a4xx_gpulist;
30 extern const struct adreno_gpulist a5xx_gpulist;
31 extern const struct adreno_gpulist a6xx_gpulist;
32 extern const struct adreno_gpulist a7xx_gpulist;
33
34 static const struct adreno_gpulist *gpulists[] = {
35 &a2xx_gpulist,
36 &a3xx_gpulist,
37 &a4xx_gpulist,
38 &a5xx_gpulist,
39 &a6xx_gpulist,
40 &a7xx_gpulist,
41 };
42
adreno_info(uint32_t chip_id)43 static const struct adreno_info *adreno_info(uint32_t chip_id)
44 {
45 /* identify gpu: */
46 for (int i = 0; i < ARRAY_SIZE(gpulists); i++) {
47 for (int j = 0; j < gpulists[i]->gpus_count; j++) {
48 const struct adreno_info *info = &gpulists[i]->gpus[j];
49
50 if (info->machine && !of_machine_is_compatible(info->machine))
51 continue;
52
53 for (int k = 0; info->chip_ids[k]; k++)
54 if (info->chip_ids[k] == chip_id)
55 return info;
56 }
57 }
58
59 return NULL;
60 }
61
adreno_load_gpu(struct drm_device * dev)62 struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
63 {
64 struct msm_drm_private *priv = dev->dev_private;
65 struct platform_device *pdev = priv->gpu_pdev;
66 struct msm_gpu *gpu = NULL;
67 struct adreno_gpu *adreno_gpu;
68 int ret;
69
70 if (pdev)
71 gpu = dev_to_gpu(&pdev->dev);
72
73 if (!gpu) {
74 dev_err_once(dev->dev, "no GPU device was found\n");
75 return NULL;
76 }
77
78 adreno_gpu = to_adreno_gpu(gpu);
79
80 /*
81 * The number one reason for HW init to fail is if the firmware isn't
82 * loaded yet. Try that first and don't bother continuing on
83 * otherwise
84 */
85
86 ret = adreno_load_fw(adreno_gpu);
87 if (ret)
88 return NULL;
89
90 if (gpu->funcs->ucode_load) {
91 ret = gpu->funcs->ucode_load(gpu);
92 if (ret)
93 return NULL;
94 }
95
96 /*
97 * Now that we have firmware loaded, and are ready to begin
98 * booting the gpu, go ahead and enable runpm:
99 */
100 pm_runtime_enable(&pdev->dev);
101
102 ret = pm_runtime_get_sync(&pdev->dev);
103 if (ret < 0) {
104 pm_runtime_put_noidle(&pdev->dev);
105 DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
106 goto err_disable_rpm;
107 }
108
109 mutex_lock(&gpu->lock);
110 ret = msm_gpu_hw_init(gpu);
111 mutex_unlock(&gpu->lock);
112 if (ret) {
113 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
114 goto err_put_rpm;
115 }
116
117 pm_runtime_put_autosuspend(&pdev->dev);
118
119 #ifdef CONFIG_DEBUG_FS
120 if (gpu->funcs->debugfs_init) {
121 gpu->funcs->debugfs_init(gpu, dev->primary);
122 gpu->funcs->debugfs_init(gpu, dev->render);
123 }
124 #endif
125
126 return gpu;
127
128 err_put_rpm:
129 pm_runtime_put_sync_suspend(&pdev->dev);
130 err_disable_rpm:
131 pm_runtime_disable(&pdev->dev);
132
133 return NULL;
134 }
135
find_chipid(struct device_node * node,uint32_t * chipid)136 static int find_chipid(struct device_node *node, uint32_t *chipid)
137 {
138 const char *compat;
139 int ret;
140
141 /* first search the compat strings for qcom,adreno-XYZ.W: */
142 ret = of_property_read_string_index(node, "compatible", 0, &compat);
143 if (ret == 0) {
144 unsigned int r, patch;
145
146 if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
147 sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
148 uint32_t core, major, minor;
149
150 core = r / 100;
151 r %= 100;
152 major = r / 10;
153 r %= 10;
154 minor = r;
155
156 *chipid = (core << 24) |
157 (major << 16) |
158 (minor << 8) |
159 patch;
160
161 return 0;
162 }
163
164 if (sscanf(compat, "qcom,adreno-%08x", chipid) == 1)
165 return 0;
166 }
167
168 /* and if that fails, fall back to legacy "qcom,chipid" property: */
169 ret = of_property_read_u32(node, "qcom,chipid", chipid);
170 if (ret) {
171 DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n",
172 node, ret);
173 return ret;
174 }
175
176 pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
177
178 return 0;
179 }
180
adreno_has_gpu(struct device_node * node)181 bool adreno_has_gpu(struct device_node *node)
182 {
183 const struct adreno_info *info;
184 uint32_t chip_id;
185 int ret;
186
187 ret = find_chipid(node, &chip_id);
188 if (ret)
189 return false;
190
191 info = adreno_info(chip_id);
192 if (!info) {
193 pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
194 node, ADRENO_CHIPID_ARGS(chip_id));
195 return false;
196 }
197
198 return true;
199 }
200
adreno_bind(struct device * dev,struct device * master,void * data)201 static int adreno_bind(struct device *dev, struct device *master, void *data)
202 {
203 static struct adreno_platform_config config = {};
204 const struct adreno_info *info;
205 struct msm_drm_private *priv = dev_get_drvdata(master);
206 struct drm_device *drm = priv->dev;
207 struct msm_gpu *gpu;
208 int ret;
209
210 ret = find_chipid(dev->of_node, &config.chip_id);
211 /* We shouldn't have gotten this far if we can't parse the chip_id */
212 if (WARN_ON(ret))
213 return ret;
214
215 dev->platform_data = &config;
216 priv->gpu_pdev = to_platform_device(dev);
217
218 info = adreno_info(config.chip_id);
219 /* We shouldn't have gotten this far if we don't recognize the GPU: */
220 if (WARN_ON(!info))
221 return -ENXIO;
222
223 config.info = info;
224
225 DBG("Found GPU: %"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config.chip_id));
226
227 priv->is_a2xx = info->family < ADRENO_3XX;
228 priv->has_cached_coherent =
229 !!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
230
231 gpu = info->init(drm);
232 if (IS_ERR(gpu)) {
233 dev_warn(drm->dev, "failed to load adreno gpu\n");
234 return PTR_ERR(gpu);
235 }
236
237 ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
238 if (ret)
239 return ret;
240
241 return 0;
242 }
243
244 static int adreno_system_suspend(struct device *dev);
adreno_unbind(struct device * dev,struct device * master,void * data)245 static void adreno_unbind(struct device *dev, struct device *master,
246 void *data)
247 {
248 struct msm_drm_private *priv = dev_get_drvdata(master);
249 struct msm_gpu *gpu = dev_to_gpu(dev);
250
251 if (pm_runtime_enabled(dev))
252 WARN_ON_ONCE(adreno_system_suspend(dev));
253 gpu->funcs->destroy(gpu);
254
255 priv->gpu_pdev = NULL;
256 }
257
258 static const struct component_ops a3xx_ops = {
259 .bind = adreno_bind,
260 .unbind = adreno_unbind,
261 };
262
adreno_probe(struct platform_device * pdev)263 static int adreno_probe(struct platform_device *pdev)
264 {
265 if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon") ||
266 msm_gpu_no_components())
267 return msm_gpu_probe(pdev, &a3xx_ops);
268
269 return component_add(&pdev->dev, &a3xx_ops);
270 }
271
adreno_remove(struct platform_device * pdev)272 static void adreno_remove(struct platform_device *pdev)
273 {
274 struct msm_drm_private *priv = platform_get_drvdata(pdev);
275
276 if (priv->kms_init)
277 component_del(&pdev->dev, &a3xx_ops);
278 else
279 msm_gpu_remove(pdev, &a3xx_ops);
280 }
281
adreno_shutdown(struct platform_device * pdev)282 static void adreno_shutdown(struct platform_device *pdev)
283 {
284 WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
285 }
286
287 static const struct of_device_id dt_match[] = {
288 { .compatible = "qcom,adreno" },
289 { .compatible = "qcom,adreno-3xx" },
290 /* for compatibility with imx5 gpu: */
291 { .compatible = "amd,imageon" },
292 /* for backwards compat w/ downstream kgsl DT files: */
293 { .compatible = "qcom,kgsl-3d0" },
294 {}
295 };
296
adreno_runtime_resume(struct device * dev)297 static int adreno_runtime_resume(struct device *dev)
298 {
299 struct msm_gpu *gpu = dev_to_gpu(dev);
300
301 return gpu->funcs->pm_resume(gpu);
302 }
303
adreno_runtime_suspend(struct device * dev)304 static int adreno_runtime_suspend(struct device *dev)
305 {
306 struct msm_gpu *gpu = dev_to_gpu(dev);
307
308 /*
309 * We should be holding a runpm ref, which will prevent
310 * runtime suspend. In the system suspend path, we've
311 * already waited for active jobs to complete.
312 */
313 WARN_ON_ONCE(gpu->active_submits);
314
315 return gpu->funcs->pm_suspend(gpu);
316 }
317
suspend_scheduler(struct msm_gpu * gpu)318 static void suspend_scheduler(struct msm_gpu *gpu)
319 {
320 int i;
321
322 /*
323 * Shut down the scheduler before we force suspend, so that
324 * suspend isn't racing with scheduler kthread feeding us
325 * more work.
326 *
327 * Note, we just want to park the thread, and let any jobs
328 * that are already on the hw queue complete normally, as
329 * opposed to the drm_sched_stop() path used for handling
330 * faulting/timed-out jobs. We can't really cancel any jobs
331 * already on the hw queue without racing with the GPU.
332 */
333 for (i = 0; i < gpu->nr_rings; i++) {
334 struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
335
336 drm_sched_wqueue_stop(sched);
337 }
338 }
339
resume_scheduler(struct msm_gpu * gpu)340 static void resume_scheduler(struct msm_gpu *gpu)
341 {
342 int i;
343
344 for (i = 0; i < gpu->nr_rings; i++) {
345 struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
346
347 drm_sched_wqueue_start(sched);
348 }
349 }
350
adreno_system_suspend(struct device * dev)351 static int adreno_system_suspend(struct device *dev)
352 {
353 struct msm_gpu *gpu = dev_to_gpu(dev);
354 int remaining, ret;
355
356 if (!gpu)
357 return 0;
358
359 suspend_scheduler(gpu);
360
361 remaining = wait_event_timeout(gpu->retire_event,
362 gpu->active_submits == 0,
363 msecs_to_jiffies(1000));
364 if (remaining == 0) {
365 dev_err(dev, "Timeout waiting for GPU to suspend\n");
366 ret = -EBUSY;
367 goto out;
368 }
369
370 ret = pm_runtime_force_suspend(dev);
371 out:
372 if (ret)
373 resume_scheduler(gpu);
374
375 return ret;
376 }
377
adreno_system_resume(struct device * dev)378 static int adreno_system_resume(struct device *dev)
379 {
380 struct msm_gpu *gpu = dev_to_gpu(dev);
381
382 if (!gpu)
383 return 0;
384
385 resume_scheduler(gpu);
386 return pm_runtime_force_resume(dev);
387 }
388
389 static const struct dev_pm_ops adreno_pm_ops = {
390 SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
391 RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
392 };
393
394 static struct platform_driver adreno_driver = {
395 .probe = adreno_probe,
396 .remove = adreno_remove,
397 .shutdown = adreno_shutdown,
398 .driver = {
399 .name = "adreno",
400 .of_match_table = dt_match,
401 .pm = &adreno_pm_ops,
402 },
403 };
404
adreno_register(void)405 void __init adreno_register(void)
406 {
407 platform_driver_register(&adreno_driver);
408 }
409
adreno_unregister(void)410 void __exit adreno_unregister(void)
411 {
412 platform_driver_unregister(&adreno_driver);
413 }
414