Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
11 #include <soc/qcom/cmd-db.h>
12 #include <soc/qcom/tcs.h>
24 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fault()
25 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_fault()
28 gmu->hung = true; in a6xx_gmu_fault()
31 timer_delete(&gpu->hangcheck_timer); in a6xx_gmu_fault()
34 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_gmu_fault()
46 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
52 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
55 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
70 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
83 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
97 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_gx_is_on()
101 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
124 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_set_freq()
126 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq()
134 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
137 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
138 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
142 if (info->bcms && gmu->nr_gpu_bws > 1) { in a6xx_gmu_set_freq()
145 for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) { in a6xx_gmu_set_freq()
146 if (bw == gmu->gpu_bw_table[bw_index]) in a6xx_gmu_set_freq()
165 do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]); in a6xx_gmu_set_freq()
172 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
173 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
175 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
186 if (!gmu->legacy) { in a6xx_gmu_set_freq()
190 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
211 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
213 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
220 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq()
222 return gmu->freq; in a6xx_gmu_get_freq()
228 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
231 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
237 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
245 /* Wait for the GMU to get to its most idle state */
254 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_start()
284 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
286 set_bit(GMU_STATUS_FW_START, &gmu->status); in a6xx_gmu_start()
301 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
357 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
360 return -EINVAL; in a6xx_gmu_set_oob()
362 if (gmu->legacy) { in a6xx_gmu_set_oob()
369 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
370 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
372 return -EINVAL; in a6xx_gmu_set_oob()
379 /* Wait for the acknowledge interrupt */ in a6xx_gmu_set_oob()
384 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
400 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
405 if (gmu->legacy) in a6xx_gmu_clear_oob()
419 WARN_ON(!gmu->legacy); in a6xx_sptprac_enable()
422 if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE) in a6xx_sptprac_enable()
431 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
444 if (!gmu->legacy) in a6xx_sptprac_disable()
456 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
469 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
481 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gemnoc_workaround()
501 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
504 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
519 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
520 ret = -ETIMEDOUT; in a6xx_gmu_notify_slumber()
537 if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status)) in a6xx_rpmh_start()
545 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
553 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
567 if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) in a6xx_rpmh_stop()
575 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
579 set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status); in a6xx_rpmh_stop()
593 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_init()
594 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
667 /* Set TCS commands used by PDC sequence for low power modes */ in a6xx_gmu_rpmh_init()
718 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
727 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_power_config()
740 switch (gmu->idle_level) { in a6xx_gmu_power_config()
776 if (!in_range(blk->addr, bo->iova, bo->size)) in fw_block_mem()
779 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); in fw_block_mem()
784 ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
789 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_load()
790 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; in a6xx_gmu_fw_load()
801 if (gmu->legacy) { in a6xx_gmu_fw_load()
803 if (fw_image->size > 0x8000) { in a6xx_gmu_fw_load()
804 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
806 return -EINVAL; in a6xx_gmu_fw_load()
810 (u32*) fw_image->data, fw_image->size); in a6xx_gmu_fw_load()
815 for (blk = (const struct block_header *) fw_image->data; in a6xx_gmu_fw_load()
816 (const u8*) blk < fw_image->data + fw_image->size; in a6xx_gmu_fw_load()
818 if (blk->size == 0) in a6xx_gmu_fw_load()
821 if (in_range(blk->addr, itcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
822 reg_offset = (blk->addr - itcm_base) >> 2; in a6xx_gmu_fw_load()
825 blk->data, blk->size); in a6xx_gmu_fw_load()
826 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
827 reg_offset = (blk->addr - dtcm_base) >> 2; in a6xx_gmu_fw_load()
830 blk->data, blk->size); in a6xx_gmu_fw_load()
831 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
832 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
833 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
834 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
836 blk->addr, blk->size, blk->data[0]); in a6xx_gmu_fw_load()
852 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_start()
853 const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; in a6xx_gmu_fw_start()
875 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], in a6xx_gmu_fw_start()
877 return -ENOENT; in a6xx_gmu_fw_start()
889 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
911 if (a6xx_info->gmu_chipid) { in a6xx_gmu_fw_start()
912 chipid = a6xx_info->gmu_chipid; in a6xx_gmu_fw_start()
921 chipid = adreno_gpu->chip_id & 0xffff0000; in a6xx_gmu_fw_start()
922 chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ in a6xx_gmu_fw_start()
923 chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ in a6xx_gmu_fw_start()
929 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
930 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
935 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
945 if (gmu->legacy) { in a6xx_gmu_fw_start()
975 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
976 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
985 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_off()
1023 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_force_off()
1024 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_force_off()
1030 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
1067 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
1069 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_freq()
1073 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1081 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1083 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_bw()
1087 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); in a6xx_gmu_set_initial_bw()
1093 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_resume()
1094 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_resume()
1095 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume()
1098 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1099 return -EINVAL; in a6xx_gmu_resume()
1101 gmu->hung = false; in a6xx_gmu_resume()
1104 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1111 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1112 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1115 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1116 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1118 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1120 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1121 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1131 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1137 } else if (gmu->legacy) { in a6xx_gmu_resume()
1162 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1167 if (refcount_read(&gpu->sysprof_active) > 1) { in a6xx_gmu_resume()
1170 set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status); in a6xx_gmu_resume()
1175 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1177 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1178 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1188 if (!gmu->initialized) in a6xx_gmu_isidle()
1203 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_shutdown()
1212 if (adreno_gpu->base.needs_hw_init) { in a6xx_gmu_shutdown()
1213 if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) in a6xx_gmu_shutdown()
1216 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_gmu_shutdown()
1219 if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) in a6xx_gmu_shutdown()
1228 a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); in a6xx_gmu_shutdown()
1246 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1271 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop()
1272 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_gmu_stop()
1274 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1281 if (gmu->hung) in a6xx_gmu_stop()
1287 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_gmu_stop()
1294 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1295 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1297 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1299 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1306 struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; in a6xx_gmu_memory_free()
1308 msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); in a6xx_gmu_memory_free()
1309 msm_gem_kernel_put(gmu->debug.obj, gmu->vm); in a6xx_gmu_memory_free()
1310 msm_gem_kernel_put(gmu->icache.obj, gmu->vm); in a6xx_gmu_memory_free()
1311 msm_gem_kernel_put(gmu->dcache.obj, gmu->vm); in a6xx_gmu_memory_free()
1312 msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); in a6xx_gmu_memory_free()
1313 msm_gem_kernel_put(gmu->log.obj, gmu->vm); in a6xx_gmu_memory_free()
1315 mmu->funcs->detach(mmu); in a6xx_gmu_memory_free()
1316 drm_gpuvm_put(gmu->vm); in a6xx_gmu_memory_free()
1323 struct drm_device *dev = a6xx_gpu->base.base.dev; in a6xx_gmu_memory_alloc()
1330 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1341 bo->obj = msm_gem_new(dev, size, flags); in a6xx_gmu_memory_alloc()
1342 if (IS_ERR(bo->obj)) in a6xx_gmu_memory_alloc()
1343 return PTR_ERR(bo->obj); in a6xx_gmu_memory_alloc()
1345 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->vm, &bo->iova, in a6xx_gmu_memory_alloc()
1348 drm_gem_object_put(bo->obj); in a6xx_gmu_memory_alloc()
1352 bo->virt = msm_gem_get_vaddr(bo->obj); in a6xx_gmu_memory_alloc()
1353 bo->size = size; in a6xx_gmu_memory_alloc()
1355 msm_gem_object_set_name(bo->obj, "%s", name); in a6xx_gmu_memory_alloc()
1364 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1368 gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); in a6xx_gmu_memory_probe()
1369 if (IS_ERR(gmu->vm)) in a6xx_gmu_memory_probe()
1370 return PTR_ERR(gmu->vm); in a6xx_gmu_memory_probe()
1376 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
1396 /* Retrieve BCM data from cmd-db */ in a6xx_gmu_rpmh_bw_votes_init()
1398 const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; in a6xx_gmu_rpmh_bw_votes_init()
1402 if (!bcm->name) in a6xx_gmu_rpmh_bw_votes_init()
1405 bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count); in a6xx_gmu_rpmh_bw_votes_init()
1410 dev_err(gmu->dev, "invalid BCM '%s' aux data size\n", in a6xx_gmu_rpmh_bw_votes_init()
1411 bcm->name); in a6xx_gmu_rpmh_bw_votes_init()
1412 return -EINVAL; in a6xx_gmu_rpmh_bw_votes_init()
1419 for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) { in a6xx_gmu_rpmh_bw_votes_init()
1420 u32 *data = gmu->gpu_ib_votes[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1421 u32 bw = gmu->gpu_bw_table[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1425 const struct a6xx_bcm *bcm = &info->bcms[bcm_index]; in a6xx_gmu_rpmh_bw_votes_init()
1430 if (bcm_index == bcm_count - 1 || in a6xx_gmu_rpmh_bw_votes_init()
1432 bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd)) in a6xx_gmu_rpmh_bw_votes_init()
1440 if (bcm->fixed) { in a6xx_gmu_rpmh_bw_votes_init()
1445 (bcm->perfmode_bw && bw >= bcm->perfmode_bw)) in a6xx_gmu_rpmh_bw_votes_init()
1446 perfmode = bcm->perfmode; in a6xx_gmu_rpmh_bw_votes_init()
1453 peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width); in a6xx_gmu_rpmh_bw_votes_init()
1454 do_div(peak, bcm->buswidth); in a6xx_gmu_rpmh_bw_votes_init()
1458 do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit)); in a6xx_gmu_rpmh_bw_votes_init()
1473 /* Return the 'arc-level' for the given frequency */
1510 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1517 if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER)) in a6xx_gmu_rpmh_arc_votes_init()
1524 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1547 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1583 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_votes_init()
1584 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_rpmh_votes_init()
1585 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_rpmh_votes_init()
1589 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1590 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1593 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1594 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1597 if (info->bcms && gmu->nr_gpu_bws > 1) in a6xx_gmu_rpmh_votes_init()
1618 count = size - 1; in a6xx_gmu_build_freq_table()
1650 count = size - 1; in a6xx_gmu_build_bw_table()
1670 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_pwrlevels_probe()
1671 const struct a6xx_info *info = adreno_gpu->info->a6xx; in a6xx_gmu_pwrlevels_probe()
1672 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_pwrlevels_probe()
1680 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1682 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1686 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1687 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1693 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1694 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1696 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1702 if (info->bcms) in a6xx_gmu_pwrlevels_probe()
1703 gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1704 gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table)); in a6xx_gmu_pwrlevels_probe()
1713 struct a6xx_hfi_acd_table *cmd = &gmu->acd_table; in a6xx_gmu_acd_probe()
1714 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_acd_probe()
1715 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_acd_probe()
1721 DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n"); in a6xx_gmu_acd_probe()
1725 cmd->version = 1; in a6xx_gmu_acd_probe()
1726 cmd->stride = 1; in a6xx_gmu_acd_probe()
1727 cmd->enable_by_level = 0; in a6xx_gmu_acd_probe()
1729 /* Skip freq = 0 and parse acd-level for rest of the OPPs */ in a6xx_gmu_acd_probe()
1730 for (i = 1; i < gmu->nr_gpu_freqs; i++) { in a6xx_gmu_acd_probe()
1736 freq = gmu->gpu_freqs[i]; in a6xx_gmu_acd_probe()
1738 opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); in a6xx_gmu_acd_probe()
1741 ret = of_property_read_u32(np, "qcom,opp-acd-level", &val); in a6xx_gmu_acd_probe()
1744 if (ret == -EINVAL) in a6xx_gmu_acd_probe()
1747 DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq); in a6xx_gmu_acd_probe()
1751 cmd->enable_by_level |= BIT(i); in a6xx_gmu_acd_probe()
1752 cmd->data[cmd_idx++] = val; in a6xx_gmu_acd_probe()
1755 cmd->num_levels = cmd_idx; in a6xx_gmu_acd_probe()
1758 if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) { in a6xx_gmu_acd_probe()
1759 DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n"); in a6xx_gmu_acd_probe()
1760 return -EINVAL; in a6xx_gmu_acd_probe()
1764 if (IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_acd_probe()
1771 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level); in a6xx_gmu_acd_probe()
1772 if (ret && cmd->enable_by_level) { in a6xx_gmu_acd_probe()
1773 DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n"); in a6xx_gmu_acd_probe()
1782 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1787 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1789 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1790 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1792 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1793 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1806 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); in a6xx_gmu_get_mmio()
1807 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1810 ret = ioremap(res->start, resource_size(res)); in a6xx_gmu_get_mmio()
1812 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); in a6xx_gmu_get_mmio()
1813 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1828 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", in a6xx_gmu_get_irq()
1840 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_sysprof_setup()
1844 if (!pm_runtime_get_if_active(&gpu->pdev->dev)) in a6xx_gmu_sysprof_setup()
1847 mutex_lock(&gmu->lock); in a6xx_gmu_sysprof_setup()
1849 sysprof_active = refcount_read(&gpu->sysprof_active); in a6xx_gmu_sysprof_setup()
1855 if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) in a6xx_gmu_sysprof_setup()
1857 else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status)) in a6xx_gmu_sysprof_setup()
1860 mutex_unlock(&gmu->lock); in a6xx_gmu_sysprof_setup()
1862 pm_runtime_put(&gpu->pdev->dev); in a6xx_gmu_sysprof_setup()
1867 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_remove()
1868 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove()
1869 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1871 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1872 if (!gmu->initialized) { in a6xx_gmu_remove()
1873 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1877 gmu->initialized = false; in a6xx_gmu_remove()
1879 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1881 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1884 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1887 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1889 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1890 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1891 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1894 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1895 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1897 iounmap(gmu->mmio); in a6xx_gmu_remove()
1899 iounmap(gmu->rscc); in a6xx_gmu_remove()
1900 gmu->mmio = NULL; in a6xx_gmu_remove()
1901 gmu->rscc = NULL; in a6xx_gmu_remove()
1906 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1907 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1911 put_device(gmu->dev); in a6xx_gmu_remove()
1920 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1928 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init()
1932 return -ENODEV; in a6xx_gmu_wrapper_init()
1934 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1936 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1940 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1943 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1946 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1947 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1948 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1952 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1953 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1954 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1958 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1959 ret = -ENODEV; in a6xx_gmu_wrapper_init()
1963 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1964 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1965 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1968 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1969 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1970 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1974 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1979 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1982 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1985 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1992 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_init()
1993 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init()
1999 return -ENODEV; in a6xx_gmu_init()
2001 gmu->dev = &pdev->dev; in a6xx_gmu_init()
2003 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
2008 gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ? in a6xx_gmu_init()
2011 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
2018 ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); in a6xx_gmu_init()
2029 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
2032 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
2037 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
2041 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
2049 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
2050 SZ_16M - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
2054 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition in a6xx_gmu_init()
2060 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
2061 SZ_256K - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
2065 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
2066 SZ_256K - SZ_16K, 0x44000, "dcache"); in a6xx_gmu_init()
2071 gmu->legacy = true; in a6xx_gmu_init()
2074 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
2080 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
2085 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
2090 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
2091 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
2092 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
2098 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
2099 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
2100 ret = -ENODEV; in a6xx_gmu_init()
2104 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
2108 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
2109 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
2111 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
2112 ret = -ENODEV; in a6xx_gmu_init()
2116 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
2117 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
2118 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
2122 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
2124 ret = -ENODEV; in a6xx_gmu_init()
2129 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
2130 if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) { in a6xx_gmu_init()
2131 ret = -EPROBE_DEFER; in a6xx_gmu_init()
2135 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
2136 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
2137 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
2143 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
2158 gmu->initialized = true; in a6xx_gmu_init()
2163 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_init()
2164 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_init()
2166 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_init()
2167 qmp_put(gmu->qmp); in a6xx_gmu_init()
2172 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
2175 iounmap(gmu->mmio); in a6xx_gmu_init()
2177 iounmap(gmu->rscc); in a6xx_gmu_init()
2178 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
2179 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
2185 put_device(gmu->dev); in a6xx_gmu_init()