xref: /linux/drivers/gpu/drm/msm/adreno/adreno_gpu.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7  */
8 
9 #include <linux/ascii85.h>
10 #include <linux/interconnect.h>
11 #include <linux/firmware/qcom/qcom_scm.h>
12 #include <linux/kernel.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/pm_opp.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/mdt_loader.h>
17 #include <linux/nvmem-consumer.h>
18 #include <soc/qcom/ocmem.h>
19 #include "adreno_gpu.h"
20 #include "a6xx_gpu.h"
21 #include "msm_gem.h"
22 #include "msm_mmu.h"
23 
24 static u64 address_space_size = 0;
25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
26 module_param(address_space_size, ullong, 0600);
27 
28 static bool zap_available = true;
29 
30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
31 		u32 pasid)
32 {
33 	struct device *dev = &gpu->pdev->dev;
34 	const struct firmware *fw;
35 	const char *signed_fwname = NULL;
36 	struct device_node *np;
37 	struct resource r;
38 	phys_addr_t mem_phys;
39 	ssize_t mem_size;
40 	void *mem_region = NULL;
41 	int ret;
42 
43 	if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
44 		zap_available = false;
45 		return -EINVAL;
46 	}
47 
48 	np = of_get_available_child_by_name(dev->of_node, "zap-shader");
49 	if (!np) {
50 		zap_available = false;
51 		return -ENODEV;
52 	}
53 
54 	ret = of_reserved_mem_region_to_resource(np, 0, &r);
55 	if (ret) {
56 		zap_available = false;
57 		return ret;
58 	}
59 	mem_phys = r.start;
60 
61 	/*
62 	 * Check for a firmware-name property.  This is the new scheme
63 	 * to handle firmware that may be signed with device specific
64 	 * keys, allowing us to have a different zap fw path for different
65 	 * devices.
66 	 *
67 	 * If the firmware-name property is found, we bypass the
68 	 * adreno_request_fw() mechanism, because we don't need to handle
69 	 * the /lib/firmware/qcom/... vs /lib/firmware/... case.
70 	 *
71 	 * If the firmware-name property is not found, for backwards
72 	 * compatibility we fall back to the fwname from the gpulist
73 	 * table.
74 	 */
75 	of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
76 	if (signed_fwname) {
77 		fwname = signed_fwname;
78 		ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
79 		if (ret)
80 			fw = ERR_PTR(ret);
81 	} else if (fwname) {
82 		/* Request the MDT file from the default location: */
83 		fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
84 	} else {
85 		/*
86 		 * For new targets, we require the firmware-name property,
87 		 * if a zap-shader is required, rather than falling back
88 		 * to a firmware name specified in gpulist.
89 		 *
90 		 * Because the firmware is signed with a (potentially)
91 		 * device specific key, having the name come from gpulist
92 		 * was a bad idea, and is only provided for backwards
93 		 * compatibility for older targets.
94 		 */
95 		return -ENOENT;
96 	}
97 
98 	if (IS_ERR(fw)) {
99 		DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
100 		return PTR_ERR(fw);
101 	}
102 
103 	/* Figure out how much memory we need */
104 	mem_size = qcom_mdt_get_size(fw);
105 	if (mem_size < 0) {
106 		ret = mem_size;
107 		goto out;
108 	}
109 
110 	if (mem_size > resource_size(&r)) {
111 		DRM_DEV_ERROR(dev,
112 			"memory region is too small to load the MDT\n");
113 		ret = -E2BIG;
114 		goto out;
115 	}
116 
117 	/* Allocate memory for the firmware image */
118 	mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
119 	if (!mem_region) {
120 		ret = -ENOMEM;
121 		goto out;
122 	}
123 
124 	/*
125 	 * Load the rest of the MDT
126 	 *
127 	 * Note that we could be dealing with two different paths, since
128 	 * with upstream linux-firmware it would be in a qcom/ subdir..
129 	 * adreno_request_fw() handles this, but qcom_mdt_load() does
130 	 * not.  But since we've already gotten through adreno_request_fw()
131 	 * we know which of the two cases it is:
132 	 */
133 	if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
134 		ret = qcom_mdt_load(dev, fw, fwname, pasid,
135 				mem_region, mem_phys, mem_size, NULL);
136 	} else {
137 		char *newname;
138 
139 		newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
140 
141 		ret = qcom_mdt_load(dev, fw, newname, pasid,
142 				mem_region, mem_phys, mem_size, NULL);
143 		kfree(newname);
144 	}
145 	if (ret)
146 		goto out;
147 
148 	/* Send the image to the secure world */
149 	ret = qcom_scm_pas_auth_and_reset(pasid);
150 
151 	/*
152 	 * If the scm call returns -EOPNOTSUPP we assume that this target
153 	 * doesn't need/support the zap shader so quietly fail
154 	 */
155 	if (ret == -EOPNOTSUPP)
156 		zap_available = false;
157 	else if (ret)
158 		DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
159 
160 out:
161 	if (mem_region)
162 		memunmap(mem_region);
163 
164 	release_firmware(fw);
165 
166 	return ret;
167 }
168 
169 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
170 {
171 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
172 	struct platform_device *pdev = gpu->pdev;
173 
174 	/* Short cut if we determine the zap shader isn't available/needed */
175 	if (!zap_available)
176 		return -ENODEV;
177 
178 	/* We need SCM to be able to load the firmware */
179 	if (!qcom_scm_is_available()) {
180 		DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
181 		return -EPROBE_DEFER;
182 	}
183 
184 	return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
185 }
186 
187 struct drm_gpuvm *
188 adreno_create_vm(struct msm_gpu *gpu,
189 		 struct platform_device *pdev)
190 {
191 	return adreno_iommu_create_vm(gpu, pdev, 0);
192 }
193 
194 struct drm_gpuvm *
195 adreno_iommu_create_vm(struct msm_gpu *gpu,
196 		       struct platform_device *pdev,
197 		       unsigned long quirks)
198 {
199 	struct iommu_domain_geometry *geometry;
200 	struct msm_mmu *mmu;
201 	struct drm_gpuvm *vm;
202 	u64 start, size;
203 
204 	mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
205 	if (IS_ERR(mmu))
206 		return ERR_CAST(mmu);
207 
208 	geometry = msm_iommu_get_geometry(mmu);
209 	if (IS_ERR(geometry))
210 		return ERR_CAST(geometry);
211 
212 	/*
213 	 * Use the aperture start or SZ_16M, whichever is greater. This will
214 	 * ensure that we align with the allocated pagetable range while still
215 	 * allowing room in the lower 32 bits for GMEM and whatnot
216 	 */
217 	start = max_t(u64, SZ_16M, geometry->aperture_start);
218 	size = geometry->aperture_end - start + 1;
219 
220 	vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0),
221 			       size, true);
222 
223 	if (IS_ERR(vm) && !IS_ERR(mmu))
224 		mmu->funcs->destroy(mmu);
225 
226 	return vm;
227 }
228 
229 u64 adreno_private_vm_size(struct msm_gpu *gpu)
230 {
231 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
232 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
233 	const struct io_pgtable_cfg *ttbr1_cfg;
234 
235 	if (address_space_size)
236 		return address_space_size;
237 
238 	if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA)
239 		return SZ_4G;
240 
241 	if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg)
242 		return SZ_4G;
243 
244 	ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
245 
246 	/*
247 	 * Userspace VM is actually using TTBR0, but both are the same size,
248 	 * with b48 (sign bit) selecting which TTBRn to use.  So if IAS is
249 	 * 48, the total (kernel+user) address space size is effectively
250 	 * 49 bits.  But what userspace is control of is the lower 48.
251 	 */
252 	return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
253 }
254 
255 void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
256 {
257 	struct msm_gpu *gpu = &adreno_gpu->base;
258 	struct msm_drm_private *priv = gpu->dev->dev_private;
259 	unsigned long flags;
260 
261 	/*
262 	 * Wait until the cooldown period has passed and we would actually
263 	 * collect a crashdump to re-enable stall-on-fault.
264 	 */
265 	spin_lock_irqsave(&priv->fault_stall_lock, flags);
266 	if (!priv->stall_enabled &&
267 			ktime_after(ktime_get(), priv->stall_reenable_time) &&
268 			!READ_ONCE(gpu->crashstate)) {
269 		struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
270 
271 		priv->stall_enabled = true;
272 
273 		mmu->funcs->set_stall(mmu, true);
274 	}
275 	spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
276 }
277 
278 #define ARM_SMMU_FSR_TF                 BIT(1)
279 #define ARM_SMMU_FSR_PF			BIT(3)
280 #define ARM_SMMU_FSR_EF			BIT(4)
281 #define ARM_SMMU_FSR_SS			BIT(30)
282 
283 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
284 			 struct adreno_smmu_fault_info *info, const char *block,
285 			 u32 scratch[4])
286 {
287 	struct adreno_gpu *adreno_gpu = container_of(gpu, struct adreno_gpu, base);
288 	struct msm_drm_private *priv = gpu->dev->dev_private;
289 	struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
290 	const char *type = "UNKNOWN";
291 	bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
292 		!READ_ONCE(gpu->crashstate);
293 	unsigned long irq_flags;
294 
295 	/*
296 	 * In case there is a subsequent storm of pagefaults, disable
297 	 * stall-on-fault for at least half a second.
298 	 */
299 	spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
300 	if (priv->stall_enabled) {
301 		priv->stall_enabled = false;
302 
303 		mmu->funcs->set_stall(mmu, false);
304 	}
305 
306 	priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
307 	spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
308 
309 	/*
310 	 * Print a default message if we couldn't get the data from the
311 	 * adreno-smmu-priv
312 	 */
313 	if (!info) {
314 		pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
315 			iova, flags,
316 			scratch[0], scratch[1], scratch[2], scratch[3]);
317 
318 		return 0;
319 	}
320 
321 	if (info->fsr & ARM_SMMU_FSR_TF)
322 		type = "TRANSLATION";
323 	else if (info->fsr & ARM_SMMU_FSR_PF)
324 		type = "PERMISSION";
325 	else if (info->fsr & ARM_SMMU_FSR_EF)
326 		type = "EXTERNAL";
327 
328 	pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
329 			info->ttbr0, iova,
330 			flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
331 			type, block,
332 			scratch[0], scratch[1], scratch[2], scratch[3]);
333 
334 	if (do_devcoredump) {
335 		struct msm_gpu_fault_info fault_info = {};
336 
337 		/* Turn off the hangcheck timer to keep it from bothering us */
338 		timer_delete(&gpu->hangcheck_timer);
339 
340 		/* Let any concurrent GMU transactions know that the MMU may be
341 		 * blocked for a while and they should wait on us.
342 		 */
343 		reinit_completion(&adreno_gpu->fault_coredump_done);
344 
345 		fault_info.ttbr0 = info->ttbr0;
346 		fault_info.iova  = iova;
347 		fault_info.flags = flags;
348 		fault_info.type  = type;
349 		fault_info.block = block;
350 
351 		msm_gpu_fault_crashstate_capture(gpu, &fault_info);
352 
353 		complete_all(&adreno_gpu->fault_coredump_done);
354 	}
355 
356 	return 0;
357 }
358 
359 int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
360 		     uint32_t param, uint64_t *value, uint32_t *len)
361 {
362 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
363 	struct drm_device *drm = gpu->dev;
364 	/* Note ctx can be NULL when called from rd_open(): */
365 	struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL;
366 
367 	/* No pointer params yet */
368 	if (*len != 0)
369 		return UERR(EINVAL, drm, "invalid len");
370 
371 	switch (param) {
372 	case MSM_PARAM_GPU_ID:
373 		*value = adreno_gpu->info->revn;
374 		return 0;
375 	case MSM_PARAM_GMEM_SIZE:
376 		*value = adreno_gpu->info->gmem;
377 		return 0;
378 	case MSM_PARAM_GMEM_BASE:
379 		if (adreno_gpu->info->family >= ADRENO_6XX_GEN4)
380 			*value = 0;
381 		else
382 			*value = 0x100000;
383 		return 0;
384 	case MSM_PARAM_CHIP_ID:
385 		*value = adreno_gpu->chip_id;
386 		if (!adreno_gpu->info->revn)
387 			*value |= ((uint64_t) adreno_gpu->speedbin) << 32;
388 		return 0;
389 	case MSM_PARAM_MAX_FREQ:
390 		*value = adreno_gpu->base.fast_rate;
391 		return 0;
392 	case MSM_PARAM_TIMESTAMP:
393 		if (adreno_gpu->funcs->get_timestamp) {
394 			pm_runtime_get_sync(&gpu->pdev->dev);
395 			*value = adreno_gpu->funcs->get_timestamp(gpu);
396 			pm_runtime_put_autosuspend(&gpu->pdev->dev);
397 
398 			return 0;
399 		}
400 		return -EINVAL;
401 	case MSM_PARAM_PRIORITIES:
402 		*value = gpu->nr_rings * NR_SCHED_PRIORITIES;
403 		return 0;
404 	case MSM_PARAM_PP_PGTABLE:
405 		*value = 0;
406 		return 0;
407 	case MSM_PARAM_FAULTS:
408 		if (vm)
409 			*value = gpu->global_faults + to_msm_vm(vm)->faults;
410 		else
411 			*value = gpu->global_faults;
412 		return 0;
413 	case MSM_PARAM_SUSPENDS:
414 		*value = gpu->suspend_count;
415 		return 0;
416 	case MSM_PARAM_VA_START:
417 		if (vm == gpu->vm)
418 			return UERR(EINVAL, drm, "requires per-process pgtables");
419 		*value = vm->mm_start;
420 		return 0;
421 	case MSM_PARAM_VA_SIZE:
422 		if (vm == gpu->vm)
423 			return UERR(EINVAL, drm, "requires per-process pgtables");
424 		*value = vm->mm_range;
425 		return 0;
426 	case MSM_PARAM_HIGHEST_BANK_BIT:
427 		*value = adreno_gpu->ubwc_config->highest_bank_bit;
428 		return 0;
429 	case MSM_PARAM_RAYTRACING:
430 		*value = adreno_gpu->has_ray_tracing;
431 		return 0;
432 	case MSM_PARAM_UBWC_SWIZZLE:
433 		*value = adreno_gpu->ubwc_config->ubwc_swizzle;
434 		return 0;
435 	case MSM_PARAM_MACROTILE_MODE:
436 		*value = adreno_gpu->ubwc_config->macrotile_mode;
437 		return 0;
438 	case MSM_PARAM_UCHE_TRAP_BASE:
439 		*value = adreno_gpu->uche_trap_base;
440 		return 0;
441 	case MSM_PARAM_HAS_PRR:
442 		*value = adreno_smmu_has_prr(gpu);
443 		return 0;
444 	case MSM_PARAM_AQE:
445 		*value = !!(adreno_gpu->funcs->aqe_is_enabled &&
446 			    adreno_gpu->funcs->aqe_is_enabled(adreno_gpu));
447 		return 0;
448 	default:
449 		return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
450 	}
451 }
452 
453 int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx,
454 		     uint32_t param, uint64_t value, uint32_t len)
455 {
456 	struct drm_device *drm = gpu->dev;
457 
458 	switch (param) {
459 	case MSM_PARAM_COMM:
460 	case MSM_PARAM_CMDLINE:
461 		/* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
462 		 * that should be a reasonable upper bound
463 		 */
464 		if (len > PAGE_SIZE)
465 			return UERR(EINVAL, drm, "invalid len");
466 		break;
467 	default:
468 		if (len != 0)
469 			return UERR(EINVAL, drm, "invalid len");
470 	}
471 
472 	switch (param) {
473 	case MSM_PARAM_COMM:
474 	case MSM_PARAM_CMDLINE: {
475 		char *str, **paramp;
476 
477 		str = memdup_user_nul(u64_to_user_ptr(value), len);
478 		if (IS_ERR(str))
479 			return PTR_ERR(str);
480 
481 		mutex_lock(&gpu->lock);
482 
483 		if (param == MSM_PARAM_COMM) {
484 			paramp = &ctx->comm;
485 		} else {
486 			paramp = &ctx->cmdline;
487 		}
488 
489 		kfree(*paramp);
490 		*paramp = str;
491 
492 		mutex_unlock(&gpu->lock);
493 
494 		return 0;
495 	}
496 	case MSM_PARAM_SYSPROF:
497 		if (!capable(CAP_SYS_ADMIN))
498 			return UERR(EPERM, drm, "invalid permissions");
499 		return msm_context_set_sysprof(ctx, gpu, value);
500 	case MSM_PARAM_EN_VM_BIND:
501 		/* We can only support VM_BIND with per-process pgtables: */
502 		if (ctx->vm == gpu->vm)
503 			return UERR(EINVAL, drm, "requires per-process pgtables");
504 
505 		/*
506 		 * We can only swtich to VM_BIND mode if the VM has not yet
507 		 * been created:
508 		 */
509 		if (ctx->vm)
510 			return UERR(EBUSY, drm, "VM already created");
511 
512 		ctx->userspace_managed_vm = value;
513 
514 		return 0;
515 	default:
516 		return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
517 	}
518 }
519 
520 const struct firmware *
521 adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
522 {
523 	struct drm_device *drm = adreno_gpu->base.dev;
524 	const struct firmware *fw = NULL;
525 	char *newname;
526 	int ret;
527 
528 	newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
529 	if (!newname)
530 		return ERR_PTR(-ENOMEM);
531 
532 	/*
533 	 * Try first to load from qcom/$fwfile using a direct load (to avoid
534 	 * a potential timeout waiting for usermode helper)
535 	 */
536 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
537 	    (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
538 
539 		ret = request_firmware_direct(&fw, newname, drm->dev);
540 		if (!ret) {
541 			DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
542 				newname);
543 			adreno_gpu->fwloc = FW_LOCATION_NEW;
544 			goto out;
545 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
546 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
547 				newname, ret);
548 			fw = ERR_PTR(ret);
549 			goto out;
550 		}
551 	}
552 
553 	/*
554 	 * Then try the legacy location without qcom/ prefix
555 	 */
556 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
557 	    (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
558 
559 		ret = request_firmware_direct(&fw, fwname, drm->dev);
560 		if (!ret) {
561 			DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
562 				fwname);
563 			adreno_gpu->fwloc = FW_LOCATION_LEGACY;
564 			goto out;
565 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
566 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
567 				fwname, ret);
568 			fw = ERR_PTR(ret);
569 			goto out;
570 		}
571 	}
572 
573 	/*
574 	 * Finally fall back to request_firmware() for cases where the
575 	 * usermode helper is needed (I think mainly android)
576 	 */
577 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
578 	    (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
579 
580 		ret = request_firmware(&fw, newname, drm->dev);
581 		if (!ret) {
582 			DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
583 				newname);
584 			adreno_gpu->fwloc = FW_LOCATION_HELPER;
585 			goto out;
586 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
587 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
588 				newname, ret);
589 			fw = ERR_PTR(ret);
590 			goto out;
591 		}
592 	}
593 
594 	DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
595 	fw = ERR_PTR(-ENOENT);
596 out:
597 	kfree(newname);
598 	return fw;
599 }
600 
601 int adreno_load_fw(struct adreno_gpu *adreno_gpu)
602 {
603 	int i;
604 
605 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
606 		const struct firmware *fw;
607 
608 		if (!adreno_gpu->info->fw[i])
609 			continue;
610 
611 		/* Skip loading GMU firmware with GMU Wrapper */
612 		if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
613 			continue;
614 
615 		/* Skip if the firmware has already been loaded */
616 		if (adreno_gpu->fw[i])
617 			continue;
618 
619 		fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
620 		if (IS_ERR(fw))
621 			return PTR_ERR(fw);
622 
623 		adreno_gpu->fw[i] = fw;
624 	}
625 
626 	return 0;
627 }
628 
629 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
630 		const struct firmware *fw, u64 *iova)
631 {
632 	struct drm_gem_object *bo;
633 	void *ptr;
634 
635 	ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
636 		MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova);
637 
638 	if (IS_ERR(ptr))
639 		return ERR_CAST(ptr);
640 
641 	memcpy(ptr, &fw->data[4], fw->size - 4);
642 
643 	msm_gem_put_vaddr(bo);
644 
645 	return bo;
646 }
647 
648 int adreno_hw_init(struct msm_gpu *gpu)
649 {
650 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
651 	int ret;
652 
653 	VERB("%s", gpu->name);
654 
655 	if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 &&
656 	    qcom_scm_set_gpu_smmu_aperture_is_available()) {
657 		/* We currently always use context bank 0, so hard code this */
658 		ret = qcom_scm_set_gpu_smmu_aperture(0);
659 		if (ret)
660 			DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret);
661 	}
662 
663 	for (int i = 0; i < gpu->nr_rings; i++) {
664 		struct msm_ringbuffer *ring = gpu->rb[i];
665 
666 		if (!ring)
667 			continue;
668 
669 		ring->cur = ring->start;
670 		ring->next = ring->start;
671 		ring->memptrs->rptr = 0;
672 		ring->memptrs->bv_fence = ring->fctx->completed_fence;
673 
674 		/* Detect and clean up an impossible fence, ie. if GPU managed
675 		 * to scribble something invalid, we don't want that to confuse
676 		 * us into mistakingly believing that submits have completed.
677 		 */
678 		if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
679 			ring->memptrs->fence = ring->fctx->last_fence;
680 		}
681 	}
682 
683 	return 0;
684 }
685 
686 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
687 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
688 		struct msm_ringbuffer *ring)
689 {
690 	struct msm_gpu *gpu = &adreno_gpu->base;
691 
692 	return gpu->funcs->get_rptr(gpu, ring);
693 }
694 
695 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
696 {
697 	return gpu->rb[0];
698 }
699 
700 void adreno_recover(struct msm_gpu *gpu)
701 {
702 	struct drm_device *dev = gpu->dev;
703 	int ret;
704 
705 	// XXX pm-runtime??  we *need* the device to be off after this
706 	// so maybe continuing to call ->pm_suspend/resume() is better?
707 
708 	gpu->funcs->pm_suspend(gpu);
709 	gpu->funcs->pm_resume(gpu);
710 
711 	ret = msm_gpu_hw_init(gpu);
712 	if (ret) {
713 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
714 		/* hmm, oh well? */
715 	}
716 }
717 
718 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
719 {
720 	uint32_t wptr;
721 
722 	/* Copy the shadow to the actual register */
723 	ring->cur = ring->next;
724 
725 	/*
726 	 * Mask wptr value that we calculate to fit in the HW range. This is
727 	 * to account for the possibility that the last command fit exactly into
728 	 * the ringbuffer and rb->next hasn't wrapped to zero yet
729 	 */
730 	wptr = get_wptr(ring);
731 
732 	/* ensure writes to ringbuffer have hit system memory: */
733 	mb();
734 
735 	gpu_write(gpu, reg, wptr);
736 }
737 
738 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
739 {
740 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
741 	uint32_t wptr = get_wptr(ring);
742 
743 	/* wait for CP to drain ringbuffer: */
744 	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
745 		return true;
746 
747 	/* TODO maybe we need to reset GPU here to recover from hang? */
748 	DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
749 		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
750 
751 	return false;
752 }
753 
754 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
755 {
756 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
757 	int i, count = 0;
758 
759 	WARN_ON(!mutex_is_locked(&gpu->lock));
760 
761 	kref_init(&state->ref);
762 
763 	ktime_get_real_ts64(&state->time);
764 
765 	for (i = 0; i < gpu->nr_rings; i++) {
766 		int size = 0, j;
767 
768 		state->ring[i].fence = gpu->rb[i]->memptrs->fence;
769 		state->ring[i].iova = gpu->rb[i]->iova;
770 		state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
771 		state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
772 		state->ring[i].wptr = get_wptr(gpu->rb[i]);
773 
774 		/* Copy at least 'wptr' dwords of the data */
775 		size = state->ring[i].wptr;
776 
777 		/* After wptr find the last non zero dword to save space */
778 		for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
779 			if (gpu->rb[i]->start[j])
780 				size = j + 1;
781 
782 		if (size) {
783 			state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
784 			if (state->ring[i].data)
785 				state->ring[i].data_size = size << 2;
786 		}
787 	}
788 
789 	/* Some targets prefer to collect their own registers */
790 	if (!adreno_gpu->registers)
791 		return 0;
792 
793 	/* Count the number of registers */
794 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
795 		count += adreno_gpu->registers[i + 1] -
796 			adreno_gpu->registers[i] + 1;
797 
798 	state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
799 	if (state->registers) {
800 		int pos = 0;
801 
802 		for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
803 			u32 start = adreno_gpu->registers[i];
804 			u32 end   = adreno_gpu->registers[i + 1];
805 			u32 addr;
806 
807 			for (addr = start; addr <= end; addr++) {
808 				state->registers[pos++] = addr;
809 				state->registers[pos++] = gpu_read(gpu, addr);
810 			}
811 		}
812 
813 		state->nr_registers = count;
814 	}
815 
816 	return 0;
817 }
818 
819 void adreno_gpu_state_destroy(struct msm_gpu_state *state)
820 {
821 	int i;
822 
823 	for (i = 0; i < ARRAY_SIZE(state->ring); i++)
824 		kvfree(state->ring[i].data);
825 
826 	for (i = 0; state->bos && i < state->nr_bos; i++)
827 		kvfree(state->bos[i].data);
828 
829 	kfree(state->vm_logs);
830 	kfree(state->bos);
831 	kfree(state->comm);
832 	kfree(state->cmd);
833 	kfree(state->registers);
834 }
835 
836 static void adreno_gpu_state_kref_destroy(struct kref *kref)
837 {
838 	struct msm_gpu_state *state = container_of(kref,
839 		struct msm_gpu_state, ref);
840 
841 	adreno_gpu_state_destroy(state);
842 	kfree(state);
843 }
844 
845 int adreno_gpu_state_put(struct msm_gpu_state *state)
846 {
847 	if (IS_ERR_OR_NULL(state))
848 		return 1;
849 
850 	return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
851 }
852 
853 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
854 
855 static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
856 {
857 	void *buf;
858 	size_t buf_itr = 0, buffer_size;
859 	char out[ASCII85_BUFSZ];
860 	long l;
861 	int i;
862 
863 	if (!src || !len)
864 		return NULL;
865 
866 	l = ascii85_encode_len(len);
867 
868 	/*
869 	 * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
870 	 * account for the worst case of 5 bytes per dword plus the 1 for '\0'
871 	 */
872 	buffer_size = (l * 5) + 1;
873 
874 	buf = kvmalloc(buffer_size, GFP_KERNEL);
875 	if (!buf)
876 		return NULL;
877 
878 	for (i = 0; i < l; i++)
879 		buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
880 				ascii85_encode(src[i], out));
881 
882 	return buf;
883 }
884 
885 /* len is expected to be in bytes
886  *
887  * WARNING: *ptr should be allocated with kvmalloc or friends.  It can be free'd
888  * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
889  * when the unencoded raw data is encoded
890  */
891 void adreno_show_object(struct drm_printer *p, void **ptr, int len,
892 		bool *encoded)
893 {
894 	if (!*ptr || !len)
895 		return;
896 
897 	if (!*encoded) {
898 		long datalen, i;
899 		u32 *buf = *ptr;
900 
901 		/*
902 		 * Only dump the non-zero part of the buffer - rarely will
903 		 * any data completely fill the entire allocated size of
904 		 * the buffer.
905 		 */
906 		for (datalen = 0, i = 0; i < len >> 2; i++)
907 			if (buf[i])
908 				datalen = ((i + 1) << 2);
909 
910 		/*
911 		 * If we reach here, then the originally captured binary buffer
912 		 * will be replaced with the ascii85 encoded string
913 		 */
914 		*ptr = adreno_gpu_ascii85_encode(buf, datalen);
915 
916 		kvfree(buf);
917 
918 		*encoded = true;
919 	}
920 
921 	if (!*ptr)
922 		return;
923 
924 	drm_puts(p, "    data: !!ascii85 |\n");
925 	drm_puts(p, "     ");
926 
927 	drm_puts(p, *ptr);
928 
929 	drm_puts(p, "\n");
930 }
931 
932 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
933 		struct drm_printer *p)
934 {
935 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
936 	int i;
937 
938 	if (IS_ERR_OR_NULL(state))
939 		return;
940 
941 	drm_printf(p, "revision: %u (%"ADRENO_CHIPID_FMT")\n",
942 			adreno_gpu->info->revn,
943 			ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
944 	/*
945 	 * If this is state collected due to iova fault, so fault related info
946 	 *
947 	 * TTBR0 would not be zero, so this is a good way to distinguish
948 	 */
949 	if (state->fault_info.ttbr0) {
950 		const struct msm_gpu_fault_info *info = &state->fault_info;
951 
952 		drm_puts(p, "fault-info:\n");
953 		drm_printf(p, "  - ttbr0=%.16llx\n", info->ttbr0);
954 		drm_printf(p, "  - iova=%.16lx\n", info->iova);
955 		drm_printf(p, "  - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
956 		drm_printf(p, "  - type=%s\n", info->type);
957 		drm_printf(p, "  - source=%s\n", info->block);
958 
959 		/* Information extracted from what we think are the current
960 		 * pgtables.  Hopefully the TTBR0 matches what we've extracted
961 		 * from the SMMU registers in smmu_info!
962 		 */
963 		drm_puts(p, "pgtable-fault-info:\n");
964 		drm_printf(p, "  - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0);
965 		drm_printf(p, "  - asid: %d\n", info->asid);
966 		drm_printf(p, "  - ptes: %.16llx %.16llx %.16llx %.16llx\n",
967 			   info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
968 	}
969 
970 	if (state->vm_logs) {
971 		drm_puts(p, "vm-log:\n");
972 		for (i = 0; i < state->nr_vm_logs; i++) {
973 			struct msm_gem_vm_log_entry *e = &state->vm_logs[i];
974 			drm_printf(p, "  - %s:%d: 0x%016llx-0x%016llx\n",
975 				   e->op, e->queue_id, e->iova,
976 				   e->iova + e->range);
977 		}
978 	}
979 
980 	drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
981 
982 	drm_puts(p, "ringbuffer:\n");
983 
984 	for (i = 0; i < gpu->nr_rings; i++) {
985 		drm_printf(p, "  - id: %d\n", i);
986 		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
987 		drm_printf(p, "    last-fence: %u\n", state->ring[i].seqno);
988 		drm_printf(p, "    retired-fence: %u\n", state->ring[i].fence);
989 		drm_printf(p, "    rptr: %u\n", state->ring[i].rptr);
990 		drm_printf(p, "    wptr: %u\n", state->ring[i].wptr);
991 		drm_printf(p, "    size: %u\n", MSM_GPU_RINGBUFFER_SZ);
992 
993 		adreno_show_object(p, &state->ring[i].data,
994 			state->ring[i].data_size, &state->ring[i].encoded);
995 	}
996 
997 	if (state->bos) {
998 		drm_puts(p, "bos:\n");
999 
1000 		for (i = 0; i < state->nr_bos; i++) {
1001 			drm_printf(p, "  - iova: 0x%016llx\n",
1002 				state->bos[i].iova);
1003 			drm_printf(p, "    size: %zd\n", state->bos[i].size);
1004 			drm_printf(p, "    flags: 0x%x\n", state->bos[i].flags);
1005 			drm_printf(p, "    name: %-32s\n", state->bos[i].name);
1006 
1007 			adreno_show_object(p, &state->bos[i].data,
1008 				state->bos[i].size, &state->bos[i].encoded);
1009 		}
1010 	}
1011 
1012 	if (state->nr_registers) {
1013 		drm_puts(p, "registers:\n");
1014 
1015 		for (i = 0; i < state->nr_registers; i++) {
1016 			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
1017 				state->registers[i * 2] << 2,
1018 				state->registers[(i * 2) + 1]);
1019 		}
1020 	}
1021 }
1022 #endif
1023 
1024 /* Dump common gpu status and scratch registers on any hang, to make
1025  * the hangcheck logs more useful.  The scratch registers seem always
1026  * safe to read when GPU has hung (unlike some other regs, depending
1027  * on how the GPU hung), and they are useful to match up to cmdstream
1028  * dumps when debugging hangs:
1029  */
1030 void adreno_dump_info(struct msm_gpu *gpu)
1031 {
1032 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1033 	int i;
1034 
1035 	printk("revision: %u (%"ADRENO_CHIPID_FMT")\n",
1036 			adreno_gpu->info->revn,
1037 			ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
1038 
1039 	for (i = 0; i < gpu->nr_rings; i++) {
1040 		struct msm_ringbuffer *ring = gpu->rb[i];
1041 
1042 		printk("rb %d: fence:    %d/%d\n", i,
1043 			ring->memptrs->fence,
1044 			ring->fctx->last_fence);
1045 
1046 		printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
1047 		printk("rb wptr:  %d\n", get_wptr(ring));
1048 	}
1049 }
1050 
1051 /* would be nice to not have to duplicate the _show() stuff with printk(): */
1052 void adreno_dump(struct msm_gpu *gpu)
1053 {
1054 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1055 	int i;
1056 
1057 	if (!adreno_gpu->registers)
1058 		return;
1059 
1060 	/* dump these out in a form that can be parsed by demsm: */
1061 	printk("IO:region %s 00000000 00020000\n", gpu->name);
1062 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
1063 		uint32_t start = adreno_gpu->registers[i];
1064 		uint32_t end   = adreno_gpu->registers[i+1];
1065 		uint32_t addr;
1066 
1067 		for (addr = start; addr <= end; addr++) {
1068 			uint32_t val = gpu_read(gpu, addr);
1069 			printk("IO:R %08x %08x\n", addr<<2, val);
1070 		}
1071 	}
1072 }
1073 
1074 static uint32_t ring_freewords(struct msm_ringbuffer *ring)
1075 {
1076 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
1077 	uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
1078 	/* Use ring->next to calculate free size */
1079 	uint32_t wptr = ring->next - ring->start;
1080 	uint32_t rptr = get_rptr(adreno_gpu, ring);
1081 	return (rptr + (size - 1) - wptr) % size;
1082 }
1083 
1084 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
1085 {
1086 	if (spin_until(ring_freewords(ring) >= ndwords))
1087 		DRM_DEV_ERROR(ring->gpu->dev->dev,
1088 			"timeout waiting for space in ringbuffer %d\n",
1089 			ring->id);
1090 }
1091 
1092 static int adreno_get_pwrlevels(struct device *dev,
1093 		struct msm_gpu *gpu)
1094 {
1095 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1096 	unsigned long freq = ULONG_MAX;
1097 	struct dev_pm_opp *opp;
1098 	int ret;
1099 
1100 	gpu->fast_rate = 0;
1101 
1102 	/* devm_pm_opp_of_add_table may error out but will still create an OPP table */
1103 	ret = devm_pm_opp_of_add_table(dev);
1104 	if (ret == -ENODEV) {
1105 		/* Special cases for ancient hw with ancient DT bindings */
1106 		if (adreno_is_a2xx(adreno_gpu)) {
1107 			dev_warn(dev, "Unable to find the OPP table. Falling back to 200 MHz.\n");
1108 			dev_pm_opp_add(dev, 200000000, 0);
1109 		} else if (adreno_is_a320(adreno_gpu)) {
1110 			dev_warn(dev, "Unable to find the OPP table. Falling back to 450 MHz.\n");
1111 			dev_pm_opp_add(dev, 450000000, 0);
1112 		} else {
1113 			DRM_DEV_ERROR(dev, "Unable to find the OPP table\n");
1114 			return -ENODEV;
1115 		}
1116 	} else if (ret) {
1117 		DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
1118 		return ret;
1119 	}
1120 
1121 	/* Find the fastest defined rate */
1122 	opp = dev_pm_opp_find_freq_floor(dev, &freq);
1123 	if (IS_ERR(opp))
1124 		return PTR_ERR(opp);
1125 
1126 	gpu->fast_rate = freq;
1127 	dev_pm_opp_put(opp);
1128 
1129 	DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
1130 
1131 	return 0;
1132 }
1133 
1134 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
1135 			  struct adreno_ocmem *adreno_ocmem)
1136 {
1137 	struct ocmem_buf *ocmem_hdl;
1138 	struct ocmem *ocmem;
1139 
1140 	ocmem = of_get_ocmem(dev);
1141 	if (IS_ERR(ocmem)) {
1142 		if (PTR_ERR(ocmem) == -ENODEV) {
1143 			/*
1144 			 * Return success since either the ocmem property was
1145 			 * not specified in device tree, or ocmem support is
1146 			 * not compiled into the kernel.
1147 			 */
1148 			return 0;
1149 		}
1150 
1151 		return PTR_ERR(ocmem);
1152 	}
1153 
1154 	ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem);
1155 	if (IS_ERR(ocmem_hdl))
1156 		return PTR_ERR(ocmem_hdl);
1157 
1158 	adreno_ocmem->ocmem = ocmem;
1159 	adreno_ocmem->base = ocmem_hdl->addr;
1160 	adreno_ocmem->hdl = ocmem_hdl;
1161 
1162 	if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem))
1163 		return -ENOMEM;
1164 
1165 	return 0;
1166 }
1167 
1168 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
1169 {
1170 	if (adreno_ocmem && adreno_ocmem->base)
1171 		ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
1172 			   adreno_ocmem->hdl);
1173 }
1174 
1175 int adreno_read_speedbin(struct device *dev, u32 *speedbin)
1176 {
1177 	return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin);
1178 }
1179 
1180 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
1181 		struct adreno_gpu *adreno_gpu,
1182 		const struct adreno_gpu_funcs *funcs, int nr_rings)
1183 {
1184 	struct device *dev = &pdev->dev;
1185 	struct adreno_platform_config *config = dev->platform_data;
1186 	struct msm_gpu_config adreno_gpu_config  = { 0 };
1187 	struct msm_gpu *gpu = &adreno_gpu->base;
1188 	const char *gpu_name;
1189 	int ret;
1190 
1191 	adreno_gpu->funcs = funcs;
1192 	adreno_gpu->info = config->info;
1193 	adreno_gpu->chip_id = config->chip_id;
1194 
1195 	gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
1196 	gpu->pdev = pdev;
1197 
1198 	/* Only handle the core clock when GMU is not in use (or is absent). */
1199 	if (adreno_has_gmu_wrapper(adreno_gpu) ||
1200 	    adreno_has_rgmu(adreno_gpu) ||
1201 	    adreno_gpu->info->family < ADRENO_6XX_GEN1) {
1202 		/*
1203 		 * This can only be done before devm_pm_opp_of_add_table(), or
1204 		 * dev_pm_opp_set_config() will WARN_ON()
1205 		 */
1206 		if (IS_ERR(devm_clk_get(dev, "core"))) {
1207 			/*
1208 			 * If "core" is absent, go for the legacy clock name.
1209 			 * If we got this far in probing, it's a given one of
1210 			 * them exists.
1211 			 */
1212 			devm_pm_opp_set_clkname(dev, "core_clk");
1213 		} else
1214 			devm_pm_opp_set_clkname(dev, "core");
1215 	}
1216 
1217 	gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%"ADRENO_CHIPID_FMT,
1218 			ADRENO_CHIPID_ARGS(config->chip_id));
1219 	if (!gpu_name)
1220 		return -ENOMEM;
1221 
1222 	adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
1223 
1224 	adreno_gpu_config.nr_rings = nr_rings;
1225 
1226 	ret = adreno_get_pwrlevels(dev, gpu);
1227 	if (ret)
1228 		return ret;
1229 
1230 	init_completion(&adreno_gpu->fault_coredump_done);
1231 	complete_all(&adreno_gpu->fault_coredump_done);
1232 
1233 	pm_runtime_set_autosuspend_delay(dev,
1234 		adreno_gpu->info->inactive_period);
1235 	pm_runtime_use_autosuspend(dev);
1236 
1237 	return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
1238 			gpu_name, &adreno_gpu_config);
1239 }
1240 
1241 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
1242 {
1243 	struct msm_gpu *gpu = &adreno_gpu->base;
1244 	struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
1245 	unsigned int i;
1246 
1247 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
1248 		release_firmware(adreno_gpu->fw[i]);
1249 
1250 	if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
1251 		pm_runtime_disable(&priv->gpu_pdev->dev);
1252 
1253 	msm_gpu_cleanup(&adreno_gpu->base);
1254 }
1255