Lines Matching +full:mmu +full:- +full:500 +full:s

1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/nvmem-consumer.h>
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
45 return -EINVAL; in zap_shader_load_mdt()
48 np = of_get_child_by_name(dev->of_node, "zap-shader"); in zap_shader_load_mdt()
51 return -ENODEV; in zap_shader_load_mdt()
62 * Check for a firmware-name property. This is the new scheme in zap_shader_load_mdt()
67 * If the firmware-name property is found, we bypass the in zap_shader_load_mdt()
71 * If the firmware-name property is not found, for backwards in zap_shader_load_mdt()
75 of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); in zap_shader_load_mdt()
78 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
86 * For new targets, we require the firmware-name property, in zap_shader_load_mdt()
87 * if a zap-shader is required, rather than falling back in zap_shader_load_mdt()
95 return -ENOENT; in zap_shader_load_mdt()
99 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); in zap_shader_load_mdt()
113 ret = -E2BIG; in zap_shader_load_mdt()
120 ret = -ENOMEM; in zap_shader_load_mdt()
128 * with upstream linux-firmware it would be in a qcom/ subdir.. in zap_shader_load_mdt()
133 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
139 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); in zap_shader_load_mdt()
152 * If the scm call returns -EOPNOTSUPP we assume that this target in zap_shader_load_mdt()
155 if (ret == -EOPNOTSUPP) in zap_shader_load_mdt()
172 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
176 return -ENODEV; in adreno_zap_shader_load()
180 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); in adreno_zap_shader_load()
181 return -EPROBE_DEFER; in adreno_zap_shader_load()
184 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
200 struct msm_mmu *mmu; in adreno_iommu_create_vm() local
204 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_vm()
205 if (IS_ERR(mmu)) in adreno_iommu_create_vm()
206 return ERR_CAST(mmu); in adreno_iommu_create_vm()
208 geometry = msm_iommu_get_geometry(mmu); in adreno_iommu_create_vm()
217 start = max_t(u64, SZ_16M, geometry->aperture_start); in adreno_iommu_create_vm()
218 size = geometry->aperture_end - start + 1; in adreno_iommu_create_vm()
220 vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0), in adreno_iommu_create_vm()
223 if (IS_ERR(vm) && !IS_ERR(mmu)) in adreno_iommu_create_vm()
224 mmu->funcs->destroy(mmu); in adreno_iommu_create_vm()
232 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); in adreno_private_vm_size()
238 if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA) in adreno_private_vm_size()
241 if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg) in adreno_private_vm_size()
244 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); in adreno_private_vm_size()
252 return BIT(ttbr1_cfg->ias) - ADRENO_VM_START; in adreno_private_vm_size()
257 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_check_and_reenable_stall()
258 struct msm_drm_private *priv = gpu->dev->dev_private; in adreno_check_and_reenable_stall()
263 * collect a crashdump to re-enable stall-on-fault. in adreno_check_and_reenable_stall()
265 spin_lock_irqsave(&priv->fault_stall_lock, flags); in adreno_check_and_reenable_stall()
266 if (!priv->stall_enabled && in adreno_check_and_reenable_stall()
267 ktime_after(ktime_get(), priv->stall_reenable_time) && in adreno_check_and_reenable_stall()
268 !READ_ONCE(gpu->crashstate)) { in adreno_check_and_reenable_stall()
269 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; in adreno_check_and_reenable_stall() local
271 priv->stall_enabled = true; in adreno_check_and_reenable_stall()
273 mmu->funcs->set_stall(mmu, true); in adreno_check_and_reenable_stall()
275 spin_unlock_irqrestore(&priv->fault_stall_lock, flags); in adreno_check_and_reenable_stall()
287 struct msm_drm_private *priv = gpu->dev->dev_private; in adreno_fault_handler()
288 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; in adreno_fault_handler() local
290 bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && in adreno_fault_handler()
291 !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
296 * stall-on-fault for at least half a second. in adreno_fault_handler()
298 spin_lock_irqsave(&priv->fault_stall_lock, irq_flags); in adreno_fault_handler()
299 if (priv->stall_enabled) { in adreno_fault_handler()
300 priv->stall_enabled = false; in adreno_fault_handler()
302 mmu->funcs->set_stall(mmu, false); in adreno_fault_handler()
305 priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); in adreno_fault_handler()
306 spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); in adreno_fault_handler()
310 * adreno-smmu-priv in adreno_fault_handler()
320 if (info->fsr & ARM_SMMU_FSR_TF) in adreno_fault_handler()
322 else if (info->fsr & ARM_SMMU_FSR_PF) in adreno_fault_handler()
324 else if (info->fsr & ARM_SMMU_FSR_EF) in adreno_fault_handler()
327 …pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%… in adreno_fault_handler()
328 info->ttbr0, iova, in adreno_fault_handler()
337 timer_delete(&gpu->hangcheck_timer); in adreno_fault_handler()
339 fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
355 struct drm_device *drm = gpu->dev; in adreno_get_param()
365 *value = adreno_gpu->info->revn; in adreno_get_param()
368 *value = adreno_gpu->info->gmem; in adreno_get_param()
378 *value = adreno_gpu->chip_id; in adreno_get_param()
379 if (!adreno_gpu->info->revn) in adreno_get_param()
380 *value |= ((uint64_t) adreno_gpu->speedbin) << 32; in adreno_get_param()
383 *value = adreno_gpu->base.fast_rate; in adreno_get_param()
386 if (adreno_gpu->funcs->get_timestamp) { in adreno_get_param()
389 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
390 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
391 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
395 return -EINVAL; in adreno_get_param()
397 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
404 *value = gpu->global_faults + to_msm_vm(vm)->faults; in adreno_get_param()
406 *value = gpu->global_faults; in adreno_get_param()
409 *value = gpu->suspend_count; in adreno_get_param()
412 if (vm == gpu->vm) in adreno_get_param()
413 return UERR(EINVAL, drm, "requires per-process pgtables"); in adreno_get_param()
414 *value = vm->mm_start; in adreno_get_param()
417 if (vm == gpu->vm) in adreno_get_param()
418 return UERR(EINVAL, drm, "requires per-process pgtables"); in adreno_get_param()
419 *value = vm->mm_range; in adreno_get_param()
422 *value = adreno_gpu->ubwc_config->highest_bank_bit; in adreno_get_param()
425 *value = adreno_gpu->has_ray_tracing; in adreno_get_param()
428 *value = adreno_gpu->ubwc_config->ubwc_swizzle; in adreno_get_param()
431 *value = adreno_gpu->ubwc_config->macrotile_mode; in adreno_get_param()
434 *value = adreno_gpu->uche_trap_base; in adreno_get_param()
440 return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); in adreno_get_param()
447 struct drm_device *drm = gpu->dev; in adreno_set_param()
472 mutex_lock(&gpu->lock); in adreno_set_param()
475 paramp = &ctx->comm; in adreno_set_param()
477 paramp = &ctx->cmdline; in adreno_set_param()
483 mutex_unlock(&gpu->lock); in adreno_set_param()
492 /* We can only support VM_BIND with per-process pgtables: */ in adreno_set_param()
493 if (ctx->vm == gpu->vm) in adreno_set_param()
494 return UERR(EINVAL, drm, "requires per-process pgtables"); in adreno_set_param()
500 if (ctx->vm) in adreno_set_param()
503 ctx->userspace_managed_vm = value; in adreno_set_param()
507 return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); in adreno_set_param()
514 struct drm_device *drm = adreno_gpu->base.dev; in adreno_request_fw()
519 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); in adreno_request_fw()
521 return ERR_PTR(-ENOMEM); in adreno_request_fw()
527 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
528 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { in adreno_request_fw()
530 ret = request_firmware_direct(&fw, newname, drm->dev); in adreno_request_fw()
532 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", in adreno_request_fw()
534 adreno_gpu->fwloc = FW_LOCATION_NEW; in adreno_request_fw()
536 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
537 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
547 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
548 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { in adreno_request_fw()
550 ret = request_firmware_direct(&fw, fwname, drm->dev); in adreno_request_fw()
552 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", in adreno_request_fw()
554 adreno_gpu->fwloc = FW_LOCATION_LEGACY; in adreno_request_fw()
556 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
557 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
568 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
569 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { in adreno_request_fw()
571 ret = request_firmware(&fw, newname, drm->dev); in adreno_request_fw()
573 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", in adreno_request_fw()
575 adreno_gpu->fwloc = FW_LOCATION_HELPER; in adreno_request_fw()
577 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
578 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
585 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); in adreno_request_fw()
586 fw = ERR_PTR(-ENOENT); in adreno_request_fw()
596 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { in adreno_load_fw()
599 if (!adreno_gpu->info->fw[i]) in adreno_load_fw()
607 if (adreno_gpu->fw[i]) in adreno_load_fw()
610 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); in adreno_load_fw()
614 adreno_gpu->fw[i] = fw; in adreno_load_fw()
626 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
627 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova); in adreno_fw_create_bo()
632 memcpy(ptr, &fw->data[4], fw->size - 4); in adreno_fw_create_bo()
644 VERB("%s", gpu->name); in adreno_hw_init()
646 if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 && in adreno_hw_init()
651 DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret); in adreno_hw_init()
654 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
655 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
660 ring->cur = ring->start; in adreno_hw_init()
661 ring->next = ring->start; in adreno_hw_init()
662 ring->memptrs->rptr = 0; in adreno_hw_init()
663 ring->memptrs->bv_fence = ring->fctx->completed_fence; in adreno_hw_init()
669 if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { in adreno_hw_init()
670 ring->memptrs->fence = ring->fctx->last_fence; in adreno_hw_init()
681 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr()
683 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
688 return gpu->rb[0]; in adreno_active_ring()
693 struct drm_device *dev = gpu->dev; in adreno_recover()
696 // XXX pm-runtime?? we *need* the device to be off after this in adreno_recover()
697 // so maybe continuing to call ->pm_suspend/resume() is better? in adreno_recover()
699 gpu->funcs->pm_suspend(gpu); in adreno_recover()
700 gpu->funcs->pm_resume(gpu); in adreno_recover()
704 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
714 ring->cur = ring->next; in adreno_flush()
719 * the ringbuffer and rb->next hasn't wrapped to zero yet in adreno_flush()
739 DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n", in adreno_idle()
740 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
750 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
752 kref_init(&state->ref); in adreno_gpu_state_get()
754 ktime_get_real_ts64(&state->time); in adreno_gpu_state_get()
756 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
759 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
760 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
761 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
762 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
763 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
766 size = state->ring[i].wptr; in adreno_gpu_state_get()
769 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) in adreno_gpu_state_get()
770 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
774 state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); in adreno_gpu_state_get()
775 if (state->ring[i].data) in adreno_gpu_state_get()
776 state->ring[i].data_size = size << 2; in adreno_gpu_state_get()
781 if (!adreno_gpu->registers) in adreno_gpu_state_get()
785 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) in adreno_gpu_state_get()
786 count += adreno_gpu->registers[i + 1] - in adreno_gpu_state_get()
787 adreno_gpu->registers[i] + 1; in adreno_gpu_state_get()
789 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); in adreno_gpu_state_get()
790 if (state->registers) { in adreno_gpu_state_get()
793 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_gpu_state_get()
794 u32 start = adreno_gpu->registers[i]; in adreno_gpu_state_get()
795 u32 end = adreno_gpu->registers[i + 1]; in adreno_gpu_state_get()
799 state->registers[pos++] = addr; in adreno_gpu_state_get()
800 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
804 state->nr_registers = count; in adreno_gpu_state_get()
814 for (i = 0; i < ARRAY_SIZE(state->ring); i++) in adreno_gpu_state_destroy()
815 kvfree(state->ring[i].data); in adreno_gpu_state_destroy()
817 for (i = 0; state->bos && i < state->nr_bos; i++) in adreno_gpu_state_destroy()
818 kvfree(state->bos[i].data); in adreno_gpu_state_destroy()
820 kfree(state->vm_logs); in adreno_gpu_state_destroy()
821 kfree(state->bos); in adreno_gpu_state_destroy()
822 kfree(state->comm); in adreno_gpu_state_destroy()
823 kfree(state->cmd); in adreno_gpu_state_destroy()
824 kfree(state->registers); in adreno_gpu_state_destroy()
841 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); in adreno_gpu_state_put()
870 buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", in adreno_gpu_ascii85_encode()
893 * Only dump the non-zero part of the buffer - rarely will in adreno_show_object()
933 adreno_gpu->info->revn, in adreno_show()
934 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_show()
940 if (state->fault_info.ttbr0) { in adreno_show()
941 const struct msm_gpu_fault_info *info = &state->fault_info; in adreno_show()
943 drm_puts(p, "fault-info:\n"); in adreno_show()
944 drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); in adreno_show()
945 drm_printf(p, " - iova=%.16lx\n", info->iova); in adreno_show()
946 drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); in adreno_show()
947 drm_printf(p, " - type=%s\n", info->type); in adreno_show()
948 drm_printf(p, " - source=%s\n", info->block); in adreno_show()
954 drm_puts(p, "pgtable-fault-info:\n"); in adreno_show()
955 drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0); in adreno_show()
956 drm_printf(p, " - asid: %d\n", info->asid); in adreno_show()
957 drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n", in adreno_show()
958 info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); in adreno_show()
961 if (state->vm_logs) { in adreno_show()
962 drm_puts(p, "vm-log:\n"); in adreno_show()
963 for (i = 0; i < state->nr_vm_logs; i++) { in adreno_show()
964 struct msm_gem_vm_log_entry *e = &state->vm_logs[i]; in adreno_show()
965 drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n", in adreno_show()
966 e->op, e->queue_id, e->iova, in adreno_show()
967 e->iova + e->range); in adreno_show()
971 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); in adreno_show()
975 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
976 drm_printf(p, " - id: %d\n", i); in adreno_show()
977 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); in adreno_show()
978 drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); in adreno_show()
979 drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); in adreno_show()
980 drm_printf(p, " rptr: %u\n", state->ring[i].rptr); in adreno_show()
981 drm_printf(p, " wptr: %u\n", state->ring[i].wptr); in adreno_show()
984 adreno_show_object(p, &state->ring[i].data, in adreno_show()
985 state->ring[i].data_size, &state->ring[i].encoded); in adreno_show()
988 if (state->bos) { in adreno_show()
991 for (i = 0; i < state->nr_bos; i++) { in adreno_show()
992 drm_printf(p, " - iova: 0x%016llx\n", in adreno_show()
993 state->bos[i].iova); in adreno_show()
994 drm_printf(p, " size: %zd\n", state->bos[i].size); in adreno_show()
995 drm_printf(p, " flags: 0x%x\n", state->bos[i].flags); in adreno_show()
996 drm_printf(p, " name: %-32s\n", state->bos[i].name); in adreno_show()
998 adreno_show_object(p, &state->bos[i].data, in adreno_show()
999 state->bos[i].size, &state->bos[i].encoded); in adreno_show()
1003 if (state->nr_registers) { in adreno_show()
1006 for (i = 0; i < state->nr_registers; i++) { in adreno_show()
1007 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in adreno_show()
1008 state->registers[i * 2] << 2, in adreno_show()
1009 state->registers[(i * 2) + 1]); in adreno_show()
1027 adreno_gpu->info->revn, in adreno_dump_info()
1028 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_dump_info()
1030 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
1031 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
1034 ring->memptrs->fence, in adreno_dump_info()
1035 ring->fctx->last_fence); in adreno_dump_info()
1048 if (!adreno_gpu->registers) in adreno_dump()
1052 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
1053 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_dump()
1054 uint32_t start = adreno_gpu->registers[i]; in adreno_dump()
1055 uint32_t end = adreno_gpu->registers[i+1]; in adreno_dump()
1067 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
1069 /* Use ring->next to calculate free size */ in ring_freewords()
1070 uint32_t wptr = ring->next - ring->start; in ring_freewords()
1072 return (rptr + (size - 1) - wptr) % size; in ring_freewords()
1078 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
1080 ring->id); in adreno_wait_ring()
1091 gpu->fast_rate = 0; in adreno_get_pwrlevels()
1095 if (ret == -ENODEV) { in adreno_get_pwrlevels()
1105 return -ENODEV; in adreno_get_pwrlevels()
1117 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1120 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1133 if (PTR_ERR(ocmem) == -ENODEV) { in adreno_gpu_ocmem_init()
1145 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem); in adreno_gpu_ocmem_init()
1149 adreno_ocmem->ocmem = ocmem; in adreno_gpu_ocmem_init()
1150 adreno_ocmem->base = ocmem_hdl->addr; in adreno_gpu_ocmem_init()
1151 adreno_ocmem->hdl = ocmem_hdl; in adreno_gpu_ocmem_init()
1153 if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem)) in adreno_gpu_ocmem_init()
1154 return -ENOMEM; in adreno_gpu_ocmem_init()
1161 if (adreno_ocmem && adreno_ocmem->base) in adreno_gpu_ocmem_cleanup()
1162 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, in adreno_gpu_ocmem_cleanup()
1163 adreno_ocmem->hdl); in adreno_gpu_ocmem_cleanup()
1175 struct device *dev = &pdev->dev; in adreno_gpu_init()
1176 struct adreno_platform_config *config = dev->platform_data; in adreno_gpu_init()
1178 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init()
1183 adreno_gpu->funcs = funcs; in adreno_gpu_init()
1184 adreno_gpu->info = config->info; in adreno_gpu_init()
1185 adreno_gpu->chip_id = config->chip_id; in adreno_gpu_init()
1187 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1188 gpu->pdev = pdev; in adreno_gpu_init()
1192 adreno_gpu->info->family < ADRENO_6XX_GEN1) { in adreno_gpu_init()
1200 * If we got this far in probing, it's a given one of in adreno_gpu_init()
1210 adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); in adreno_gpu_init()
1213 ADRENO_CHIPID_ARGS(config->chip_id)); in adreno_gpu_init()
1215 return -ENOMEM; in adreno_gpu_init()
1226 adreno_gpu->info->inactive_period); in adreno_gpu_init()
1229 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, in adreno_gpu_init()
1235 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup()
1236 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()
1239 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) in adreno_gpu_cleanup()
1240 release_firmware(adreno_gpu->fw[i]); in adreno_gpu_cleanup()
1242 if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) in adreno_gpu_cleanup()
1243 pm_runtime_disable(&priv->gpu_pdev->dev); in adreno_gpu_cleanup()
1245 msm_gpu_cleanup(&adreno_gpu->base); in adreno_gpu_cleanup()