xref: /linux/drivers/gpu/drm/msm/adreno/adreno_gpu.c (revision 9009b455811b0fa1f6b0adfa94db136984db5a38)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7  */
8 
9 #include <linux/ascii85.h>
10 #include <linux/interconnect.h>
11 #include <linux/qcom_scm.h>
12 #include <linux/kernel.h>
13 #include <linux/of_address.h>
14 #include <linux/pm_opp.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/mdt_loader.h>
17 #include <soc/qcom/ocmem.h>
18 #include "adreno_gpu.h"
19 #include "a6xx_gpu.h"
20 #include "msm_gem.h"
21 #include "msm_mmu.h"
22 
23 static bool zap_available = true;
24 
25 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
26 		u32 pasid)
27 {
28 	struct device *dev = &gpu->pdev->dev;
29 	const struct firmware *fw;
30 	const char *signed_fwname = NULL;
31 	struct device_node *np, *mem_np;
32 	struct resource r;
33 	phys_addr_t mem_phys;
34 	ssize_t mem_size;
35 	void *mem_region = NULL;
36 	int ret;
37 
38 	if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
39 		zap_available = false;
40 		return -EINVAL;
41 	}
42 
43 	np = of_get_child_by_name(dev->of_node, "zap-shader");
44 	if (!np) {
45 		zap_available = false;
46 		return -ENODEV;
47 	}
48 
49 	mem_np = of_parse_phandle(np, "memory-region", 0);
50 	of_node_put(np);
51 	if (!mem_np) {
52 		zap_available = false;
53 		return -EINVAL;
54 	}
55 
56 	ret = of_address_to_resource(mem_np, 0, &r);
57 	of_node_put(mem_np);
58 	if (ret)
59 		return ret;
60 
61 	mem_phys = r.start;
62 
63 	/*
64 	 * Check for a firmware-name property.  This is the new scheme
65 	 * to handle firmware that may be signed with device specific
66 	 * keys, allowing us to have a different zap fw path for different
67 	 * devices.
68 	 *
69 	 * If the firmware-name property is found, we bypass the
70 	 * adreno_request_fw() mechanism, because we don't need to handle
71 	 * the /lib/firmware/qcom/... vs /lib/firmware/... case.
72 	 *
73 	 * If the firmware-name property is not found, for backwards
74 	 * compatibility we fall back to the fwname from the gpulist
75 	 * table.
76 	 */
77 	of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
78 	if (signed_fwname) {
79 		fwname = signed_fwname;
80 		ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
81 		if (ret)
82 			fw = ERR_PTR(ret);
83 	} else if (fwname) {
84 		/* Request the MDT file from the default location: */
85 		fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
86 	} else {
87 		/*
88 		 * For new targets, we require the firmware-name property,
89 		 * if a zap-shader is required, rather than falling back
90 		 * to a firmware name specified in gpulist.
91 		 *
92 		 * Because the firmware is signed with a (potentially)
93 		 * device specific key, having the name come from gpulist
94 		 * was a bad idea, and is only provided for backwards
95 		 * compatibility for older targets.
96 		 */
97 		return -ENODEV;
98 	}
99 
100 	if (IS_ERR(fw)) {
101 		DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
102 		return PTR_ERR(fw);
103 	}
104 
105 	/* Figure out how much memory we need */
106 	mem_size = qcom_mdt_get_size(fw);
107 	if (mem_size < 0) {
108 		ret = mem_size;
109 		goto out;
110 	}
111 
112 	if (mem_size > resource_size(&r)) {
113 		DRM_DEV_ERROR(dev,
114 			"memory region is too small to load the MDT\n");
115 		ret = -E2BIG;
116 		goto out;
117 	}
118 
119 	/* Allocate memory for the firmware image */
120 	mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
121 	if (!mem_region) {
122 		ret = -ENOMEM;
123 		goto out;
124 	}
125 
126 	/*
127 	 * Load the rest of the MDT
128 	 *
129 	 * Note that we could be dealing with two different paths, since
130 	 * with upstream linux-firmware it would be in a qcom/ subdir..
131 	 * adreno_request_fw() handles this, but qcom_mdt_load() does
132 	 * not.  But since we've already gotten through adreno_request_fw()
133 	 * we know which of the two cases it is:
134 	 */
135 	if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
136 		ret = qcom_mdt_load(dev, fw, fwname, pasid,
137 				mem_region, mem_phys, mem_size, NULL);
138 	} else {
139 		char *newname;
140 
141 		newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
142 
143 		ret = qcom_mdt_load(dev, fw, newname, pasid,
144 				mem_region, mem_phys, mem_size, NULL);
145 		kfree(newname);
146 	}
147 	if (ret)
148 		goto out;
149 
150 	/* Send the image to the secure world */
151 	ret = qcom_scm_pas_auth_and_reset(pasid);
152 
153 	/*
154 	 * If the scm call returns -EOPNOTSUPP we assume that this target
155 	 * doesn't need/support the zap shader so quietly fail
156 	 */
157 	if (ret == -EOPNOTSUPP)
158 		zap_available = false;
159 	else if (ret)
160 		DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
161 
162 out:
163 	if (mem_region)
164 		memunmap(mem_region);
165 
166 	release_firmware(fw);
167 
168 	return ret;
169 }
170 
171 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
172 {
173 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
174 	struct platform_device *pdev = gpu->pdev;
175 
176 	/* Short cut if we determine the zap shader isn't available/needed */
177 	if (!zap_available)
178 		return -ENODEV;
179 
180 	/* We need SCM to be able to load the firmware */
181 	if (!qcom_scm_is_available()) {
182 		DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
183 		return -EPROBE_DEFER;
184 	}
185 
186 	return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
187 }
188 
189 void adreno_set_llc_attributes(struct iommu_domain *iommu)
190 {
191 	struct io_pgtable_domain_attr pgtbl_cfg;
192 
193 	pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
194 	iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
195 }
196 
197 struct msm_gem_address_space *
198 adreno_iommu_create_address_space(struct msm_gpu *gpu,
199 		struct platform_device *pdev)
200 {
201 	struct iommu_domain *iommu;
202 	struct msm_mmu *mmu;
203 	struct msm_gem_address_space *aspace;
204 	u64 start, size;
205 
206 	iommu = iommu_domain_alloc(&platform_bus_type);
207 	if (!iommu)
208 		return NULL;
209 
210 	mmu = msm_iommu_new(&pdev->dev, iommu);
211 	if (IS_ERR(mmu)) {
212 		iommu_domain_free(iommu);
213 		return ERR_CAST(mmu);
214 	}
215 
216 	/*
217 	 * Use the aperture start or SZ_16M, whichever is greater. This will
218 	 * ensure that we align with the allocated pagetable range while still
219 	 * allowing room in the lower 32 bits for GMEM and whatnot
220 	 */
221 	start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
222 	size = iommu->geometry.aperture_end - start + 1;
223 
224 	aspace = msm_gem_address_space_create(mmu, "gpu",
225 		start & GENMASK_ULL(48, 0), size);
226 
227 	if (IS_ERR(aspace) && !IS_ERR(mmu))
228 		mmu->funcs->destroy(mmu);
229 
230 	return aspace;
231 }
232 
233 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
234 {
235 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
236 
237 	switch (param) {
238 	case MSM_PARAM_GPU_ID:
239 		*value = adreno_gpu->info->revn;
240 		return 0;
241 	case MSM_PARAM_GMEM_SIZE:
242 		*value = adreno_gpu->gmem;
243 		return 0;
244 	case MSM_PARAM_GMEM_BASE:
245 		*value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
246 		return 0;
247 	case MSM_PARAM_CHIP_ID:
248 		*value = adreno_gpu->rev.patchid |
249 				(adreno_gpu->rev.minor << 8) |
250 				(adreno_gpu->rev.major << 16) |
251 				(adreno_gpu->rev.core << 24);
252 		return 0;
253 	case MSM_PARAM_MAX_FREQ:
254 		*value = adreno_gpu->base.fast_rate;
255 		return 0;
256 	case MSM_PARAM_TIMESTAMP:
257 		if (adreno_gpu->funcs->get_timestamp) {
258 			int ret;
259 
260 			pm_runtime_get_sync(&gpu->pdev->dev);
261 			ret = adreno_gpu->funcs->get_timestamp(gpu, value);
262 			pm_runtime_put_autosuspend(&gpu->pdev->dev);
263 
264 			return ret;
265 		}
266 		return -EINVAL;
267 	case MSM_PARAM_NR_RINGS:
268 		*value = gpu->nr_rings;
269 		return 0;
270 	case MSM_PARAM_PP_PGTABLE:
271 		*value = 0;
272 		return 0;
273 	case MSM_PARAM_FAULTS:
274 		*value = gpu->global_faults;
275 		return 0;
276 	case MSM_PARAM_SUSPENDS:
277 		*value = gpu->suspend_count;
278 		return 0;
279 	default:
280 		DBG("%s: invalid param: %u", gpu->name, param);
281 		return -EINVAL;
282 	}
283 }
284 
285 const struct firmware *
286 adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
287 {
288 	struct drm_device *drm = adreno_gpu->base.dev;
289 	const struct firmware *fw = NULL;
290 	char *newname;
291 	int ret;
292 
293 	newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
294 	if (!newname)
295 		return ERR_PTR(-ENOMEM);
296 
297 	/*
298 	 * Try first to load from qcom/$fwfile using a direct load (to avoid
299 	 * a potential timeout waiting for usermode helper)
300 	 */
301 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
302 	    (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
303 
304 		ret = request_firmware_direct(&fw, newname, drm->dev);
305 		if (!ret) {
306 			DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
307 				newname);
308 			adreno_gpu->fwloc = FW_LOCATION_NEW;
309 			goto out;
310 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
311 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
312 				newname, ret);
313 			fw = ERR_PTR(ret);
314 			goto out;
315 		}
316 	}
317 
318 	/*
319 	 * Then try the legacy location without qcom/ prefix
320 	 */
321 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
322 	    (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
323 
324 		ret = request_firmware_direct(&fw, fwname, drm->dev);
325 		if (!ret) {
326 			DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
327 				newname);
328 			adreno_gpu->fwloc = FW_LOCATION_LEGACY;
329 			goto out;
330 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
331 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
332 				fwname, ret);
333 			fw = ERR_PTR(ret);
334 			goto out;
335 		}
336 	}
337 
338 	/*
339 	 * Finally fall back to request_firmware() for cases where the
340 	 * usermode helper is needed (I think mainly android)
341 	 */
342 	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
343 	    (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
344 
345 		ret = request_firmware(&fw, newname, drm->dev);
346 		if (!ret) {
347 			DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
348 				newname);
349 			adreno_gpu->fwloc = FW_LOCATION_HELPER;
350 			goto out;
351 		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
352 			DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
353 				newname, ret);
354 			fw = ERR_PTR(ret);
355 			goto out;
356 		}
357 	}
358 
359 	DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
360 	fw = ERR_PTR(-ENOENT);
361 out:
362 	kfree(newname);
363 	return fw;
364 }
365 
366 int adreno_load_fw(struct adreno_gpu *adreno_gpu)
367 {
368 	int i;
369 
370 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
371 		const struct firmware *fw;
372 
373 		if (!adreno_gpu->info->fw[i])
374 			continue;
375 
376 		/* Skip if the firmware has already been loaded */
377 		if (adreno_gpu->fw[i])
378 			continue;
379 
380 		fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
381 		if (IS_ERR(fw))
382 			return PTR_ERR(fw);
383 
384 		adreno_gpu->fw[i] = fw;
385 	}
386 
387 	return 0;
388 }
389 
390 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
391 		const struct firmware *fw, u64 *iova)
392 {
393 	struct drm_gem_object *bo;
394 	void *ptr;
395 
396 	ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
397 		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
398 
399 	if (IS_ERR(ptr))
400 		return ERR_CAST(ptr);
401 
402 	memcpy(ptr, &fw->data[4], fw->size - 4);
403 
404 	msm_gem_put_vaddr(bo);
405 
406 	return bo;
407 }
408 
409 int adreno_hw_init(struct msm_gpu *gpu)
410 {
411 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
412 	int ret, i;
413 
414 	DBG("%s", gpu->name);
415 
416 	ret = adreno_load_fw(adreno_gpu);
417 	if (ret)
418 		return ret;
419 
420 	for (i = 0; i < gpu->nr_rings; i++) {
421 		struct msm_ringbuffer *ring = gpu->rb[i];
422 
423 		if (!ring)
424 			continue;
425 
426 		ring->cur = ring->start;
427 		ring->next = ring->start;
428 
429 		/* reset completed fence seqno: */
430 		ring->memptrs->fence = ring->fctx->completed_fence;
431 		ring->memptrs->rptr = 0;
432 	}
433 
434 	return 0;
435 }
436 
437 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
438 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
439 		struct msm_ringbuffer *ring)
440 {
441 	struct msm_gpu *gpu = &adreno_gpu->base;
442 
443 	return gpu->funcs->get_rptr(gpu, ring);
444 }
445 
446 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
447 {
448 	return gpu->rb[0];
449 }
450 
451 void adreno_recover(struct msm_gpu *gpu)
452 {
453 	struct drm_device *dev = gpu->dev;
454 	int ret;
455 
456 	// XXX pm-runtime??  we *need* the device to be off after this
457 	// so maybe continuing to call ->pm_suspend/resume() is better?
458 
459 	gpu->funcs->pm_suspend(gpu);
460 	gpu->funcs->pm_resume(gpu);
461 
462 	ret = msm_gpu_hw_init(gpu);
463 	if (ret) {
464 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
465 		/* hmm, oh well? */
466 	}
467 }
468 
469 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
470 {
471 	uint32_t wptr;
472 
473 	/* Copy the shadow to the actual register */
474 	ring->cur = ring->next;
475 
476 	/*
477 	 * Mask wptr value that we calculate to fit in the HW range. This is
478 	 * to account for the possibility that the last command fit exactly into
479 	 * the ringbuffer and rb->next hasn't wrapped to zero yet
480 	 */
481 	wptr = get_wptr(ring);
482 
483 	/* ensure writes to ringbuffer have hit system memory: */
484 	mb();
485 
486 	gpu_write(gpu, reg, wptr);
487 }
488 
489 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
490 {
491 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
492 	uint32_t wptr = get_wptr(ring);
493 
494 	/* wait for CP to drain ringbuffer: */
495 	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
496 		return true;
497 
498 	/* TODO maybe we need to reset GPU here to recover from hang? */
499 	DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
500 		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
501 
502 	return false;
503 }
504 
505 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
506 {
507 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
508 	int i, count = 0;
509 
510 	kref_init(&state->ref);
511 
512 	ktime_get_real_ts64(&state->time);
513 
514 	for (i = 0; i < gpu->nr_rings; i++) {
515 		int size = 0, j;
516 
517 		state->ring[i].fence = gpu->rb[i]->memptrs->fence;
518 		state->ring[i].iova = gpu->rb[i]->iova;
519 		state->ring[i].seqno = gpu->rb[i]->seqno;
520 		state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
521 		state->ring[i].wptr = get_wptr(gpu->rb[i]);
522 
523 		/* Copy at least 'wptr' dwords of the data */
524 		size = state->ring[i].wptr;
525 
526 		/* After wptr find the last non zero dword to save space */
527 		for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
528 			if (gpu->rb[i]->start[j])
529 				size = j + 1;
530 
531 		if (size) {
532 			state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
533 			if (state->ring[i].data) {
534 				memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
535 				state->ring[i].data_size = size << 2;
536 			}
537 		}
538 	}
539 
540 	/* Some targets prefer to collect their own registers */
541 	if (!adreno_gpu->registers)
542 		return 0;
543 
544 	/* Count the number of registers */
545 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
546 		count += adreno_gpu->registers[i + 1] -
547 			adreno_gpu->registers[i] + 1;
548 
549 	state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
550 	if (state->registers) {
551 		int pos = 0;
552 
553 		for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
554 			u32 start = adreno_gpu->registers[i];
555 			u32 end   = adreno_gpu->registers[i + 1];
556 			u32 addr;
557 
558 			for (addr = start; addr <= end; addr++) {
559 				state->registers[pos++] = addr;
560 				state->registers[pos++] = gpu_read(gpu, addr);
561 			}
562 		}
563 
564 		state->nr_registers = count;
565 	}
566 
567 	return 0;
568 }
569 
570 void adreno_gpu_state_destroy(struct msm_gpu_state *state)
571 {
572 	int i;
573 
574 	for (i = 0; i < ARRAY_SIZE(state->ring); i++)
575 		kvfree(state->ring[i].data);
576 
577 	for (i = 0; state->bos && i < state->nr_bos; i++)
578 		kvfree(state->bos[i].data);
579 
580 	kfree(state->bos);
581 	kfree(state->comm);
582 	kfree(state->cmd);
583 	kfree(state->registers);
584 }
585 
586 static void adreno_gpu_state_kref_destroy(struct kref *kref)
587 {
588 	struct msm_gpu_state *state = container_of(kref,
589 		struct msm_gpu_state, ref);
590 
591 	adreno_gpu_state_destroy(state);
592 	kfree(state);
593 }
594 
595 int adreno_gpu_state_put(struct msm_gpu_state *state)
596 {
597 	if (IS_ERR_OR_NULL(state))
598 		return 1;
599 
600 	return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
601 }
602 
603 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
604 
605 static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
606 {
607 	void *buf;
608 	size_t buf_itr = 0, buffer_size;
609 	char out[ASCII85_BUFSZ];
610 	long l;
611 	int i;
612 
613 	if (!src || !len)
614 		return NULL;
615 
616 	l = ascii85_encode_len(len);
617 
618 	/*
619 	 * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
620 	 * account for the worst case of 5 bytes per dword plus the 1 for '\0'
621 	 */
622 	buffer_size = (l * 5) + 1;
623 
624 	buf = kvmalloc(buffer_size, GFP_KERNEL);
625 	if (!buf)
626 		return NULL;
627 
628 	for (i = 0; i < l; i++)
629 		buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
630 				ascii85_encode(src[i], out));
631 
632 	return buf;
633 }
634 
635 /* len is expected to be in bytes */
636 static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
637 		bool *encoded)
638 {
639 	if (!*ptr || !len)
640 		return;
641 
642 	if (!*encoded) {
643 		long datalen, i;
644 		u32 *buf = *ptr;
645 
646 		/*
647 		 * Only dump the non-zero part of the buffer - rarely will
648 		 * any data completely fill the entire allocated size of
649 		 * the buffer.
650 		 */
651 		for (datalen = 0, i = 0; i < len >> 2; i++)
652 			if (buf[i])
653 				datalen = ((i + 1) << 2);
654 
655 		/*
656 		 * If we reach here, then the originally captured binary buffer
657 		 * will be replaced with the ascii85 encoded string
658 		 */
659 		*ptr = adreno_gpu_ascii85_encode(buf, datalen);
660 
661 		kvfree(buf);
662 
663 		*encoded = true;
664 	}
665 
666 	if (!*ptr)
667 		return;
668 
669 	drm_puts(p, "    data: !!ascii85 |\n");
670 	drm_puts(p, "     ");
671 
672 	drm_puts(p, *ptr);
673 
674 	drm_puts(p, "\n");
675 }
676 
677 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
678 		struct drm_printer *p)
679 {
680 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
681 	int i;
682 
683 	if (IS_ERR_OR_NULL(state))
684 		return;
685 
686 	drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
687 			adreno_gpu->info->revn, adreno_gpu->rev.core,
688 			adreno_gpu->rev.major, adreno_gpu->rev.minor,
689 			adreno_gpu->rev.patchid);
690 
691 	drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
692 
693 	drm_puts(p, "ringbuffer:\n");
694 
695 	for (i = 0; i < gpu->nr_rings; i++) {
696 		drm_printf(p, "  - id: %d\n", i);
697 		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
698 		drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
699 		drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
700 		drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
701 		drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
702 		drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
703 
704 		adreno_show_object(p, &state->ring[i].data,
705 			state->ring[i].data_size, &state->ring[i].encoded);
706 	}
707 
708 	if (state->bos) {
709 		drm_puts(p, "bos:\n");
710 
711 		for (i = 0; i < state->nr_bos; i++) {
712 			drm_printf(p, "  - iova: 0x%016llx\n",
713 				state->bos[i].iova);
714 			drm_printf(p, "    size: %zd\n", state->bos[i].size);
715 
716 			adreno_show_object(p, &state->bos[i].data,
717 				state->bos[i].size, &state->bos[i].encoded);
718 		}
719 	}
720 
721 	if (state->nr_registers) {
722 		drm_puts(p, "registers:\n");
723 
724 		for (i = 0; i < state->nr_registers; i++) {
725 			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
726 				state->registers[i * 2] << 2,
727 				state->registers[(i * 2) + 1]);
728 		}
729 	}
730 }
731 #endif
732 
733 /* Dump common gpu status and scratch registers on any hang, to make
734  * the hangcheck logs more useful.  The scratch registers seem always
735  * safe to read when GPU has hung (unlike some other regs, depending
736  * on how the GPU hung), and they are useful to match up to cmdstream
737  * dumps when debugging hangs:
738  */
739 void adreno_dump_info(struct msm_gpu *gpu)
740 {
741 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
742 	int i;
743 
744 	printk("revision: %d (%d.%d.%d.%d)\n",
745 			adreno_gpu->info->revn, adreno_gpu->rev.core,
746 			adreno_gpu->rev.major, adreno_gpu->rev.minor,
747 			adreno_gpu->rev.patchid);
748 
749 	for (i = 0; i < gpu->nr_rings; i++) {
750 		struct msm_ringbuffer *ring = gpu->rb[i];
751 
752 		printk("rb %d: fence:    %d/%d\n", i,
753 			ring->memptrs->fence,
754 			ring->seqno);
755 
756 		printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
757 		printk("rb wptr:  %d\n", get_wptr(ring));
758 	}
759 }
760 
761 /* would be nice to not have to duplicate the _show() stuff with printk(): */
762 void adreno_dump(struct msm_gpu *gpu)
763 {
764 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
765 	int i;
766 
767 	if (!adreno_gpu->registers)
768 		return;
769 
770 	/* dump these out in a form that can be parsed by demsm: */
771 	printk("IO:region %s 00000000 00020000\n", gpu->name);
772 	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
773 		uint32_t start = adreno_gpu->registers[i];
774 		uint32_t end   = adreno_gpu->registers[i+1];
775 		uint32_t addr;
776 
777 		for (addr = start; addr <= end; addr++) {
778 			uint32_t val = gpu_read(gpu, addr);
779 			printk("IO:R %08x %08x\n", addr<<2, val);
780 		}
781 	}
782 }
783 
784 static uint32_t ring_freewords(struct msm_ringbuffer *ring)
785 {
786 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
787 	uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
788 	/* Use ring->next to calculate free size */
789 	uint32_t wptr = ring->next - ring->start;
790 	uint32_t rptr = get_rptr(adreno_gpu, ring);
791 	return (rptr + (size - 1) - wptr) % size;
792 }
793 
794 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
795 {
796 	if (spin_until(ring_freewords(ring) >= ndwords))
797 		DRM_DEV_ERROR(ring->gpu->dev->dev,
798 			"timeout waiting for space in ringbuffer %d\n",
799 			ring->id);
800 }
801 
802 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
803 static int adreno_get_legacy_pwrlevels(struct device *dev)
804 {
805 	struct device_node *child, *node;
806 	int ret;
807 
808 	node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
809 	if (!node) {
810 		DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
811 		return -ENXIO;
812 	}
813 
814 	for_each_child_of_node(node, child) {
815 		unsigned int val;
816 
817 		ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
818 		if (ret)
819 			continue;
820 
821 		/*
822 		 * Skip the intentionally bogus clock value found at the bottom
823 		 * of most legacy frequency tables
824 		 */
825 		if (val != 27000000)
826 			dev_pm_opp_add(dev, val, 0);
827 	}
828 
829 	of_node_put(node);
830 
831 	return 0;
832 }
833 
834 static void adreno_get_pwrlevels(struct device *dev,
835 		struct msm_gpu *gpu)
836 {
837 	unsigned long freq = ULONG_MAX;
838 	struct dev_pm_opp *opp;
839 	int ret;
840 
841 	gpu->fast_rate = 0;
842 
843 	/* You down with OPP? */
844 	if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
845 		ret = adreno_get_legacy_pwrlevels(dev);
846 	else {
847 		ret = dev_pm_opp_of_add_table(dev);
848 		if (ret)
849 			DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
850 	}
851 
852 	if (!ret) {
853 		/* Find the fastest defined rate */
854 		opp = dev_pm_opp_find_freq_floor(dev, &freq);
855 		if (!IS_ERR(opp)) {
856 			gpu->fast_rate = freq;
857 			dev_pm_opp_put(opp);
858 		}
859 	}
860 
861 	if (!gpu->fast_rate) {
862 		dev_warn(dev,
863 			"Could not find a clock rate. Using a reasonable default\n");
864 		/* Pick a suitably safe clock speed for any target */
865 		gpu->fast_rate = 200000000;
866 	}
867 
868 	DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
869 }
870 
871 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
872 			  struct adreno_ocmem *adreno_ocmem)
873 {
874 	struct ocmem_buf *ocmem_hdl;
875 	struct ocmem *ocmem;
876 
877 	ocmem = of_get_ocmem(dev);
878 	if (IS_ERR(ocmem)) {
879 		if (PTR_ERR(ocmem) == -ENODEV) {
880 			/*
881 			 * Return success since either the ocmem property was
882 			 * not specified in device tree, or ocmem support is
883 			 * not compiled into the kernel.
884 			 */
885 			return 0;
886 		}
887 
888 		return PTR_ERR(ocmem);
889 	}
890 
891 	ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
892 	if (IS_ERR(ocmem_hdl))
893 		return PTR_ERR(ocmem_hdl);
894 
895 	adreno_ocmem->ocmem = ocmem;
896 	adreno_ocmem->base = ocmem_hdl->addr;
897 	adreno_ocmem->hdl = ocmem_hdl;
898 	adreno_gpu->gmem = ocmem_hdl->len;
899 
900 	return 0;
901 }
902 
903 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
904 {
905 	if (adreno_ocmem && adreno_ocmem->base)
906 		ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
907 			   adreno_ocmem->hdl);
908 }
909 
910 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
911 		struct adreno_gpu *adreno_gpu,
912 		const struct adreno_gpu_funcs *funcs, int nr_rings)
913 {
914 	struct device *dev = &pdev->dev;
915 	struct adreno_platform_config *config = dev->platform_data;
916 	struct msm_gpu_config adreno_gpu_config  = { 0 };
917 	struct msm_gpu *gpu = &adreno_gpu->base;
918 
919 	adreno_gpu->funcs = funcs;
920 	adreno_gpu->info = adreno_info(config->rev);
921 	adreno_gpu->gmem = adreno_gpu->info->gmem;
922 	adreno_gpu->revn = adreno_gpu->info->revn;
923 	adreno_gpu->rev = config->rev;
924 
925 	adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
926 
927 	adreno_gpu_config.nr_rings = nr_rings;
928 
929 	adreno_get_pwrlevels(dev, gpu);
930 
931 	pm_runtime_set_autosuspend_delay(dev,
932 		adreno_gpu->info->inactive_period);
933 	pm_runtime_use_autosuspend(dev);
934 	pm_runtime_enable(dev);
935 
936 	return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
937 			adreno_gpu->info->name, &adreno_gpu_config);
938 }
939 
940 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
941 {
942 	struct msm_gpu *gpu = &adreno_gpu->base;
943 	struct msm_drm_private *priv = gpu->dev->dev_private;
944 	unsigned int i;
945 
946 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
947 		release_firmware(adreno_gpu->fw[i]);
948 
949 	pm_runtime_disable(&priv->gpu_pdev->dev);
950 
951 	msm_gpu_cleanup(&adreno_gpu->base);
952 
953 	icc_put(gpu->icc_path);
954 	icc_put(gpu->ocmem_icc_path);
955 }
956