Lines Matching +full:rpmh +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
11 #include <soc/qcom/cmd-db.h>
23 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fault()
24 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_fault()
27 gmu->hung = true; in a6xx_gmu_fault()
30 del_timer(&gpu->hangcheck_timer); in a6xx_gmu_fault()
33 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_gmu_fault()
45 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
51 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
54 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
69 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
82 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
98 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
113 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq()
120 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
123 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
124 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
127 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
128 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
130 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
141 if (!gmu->legacy) { in a6xx_gmu_set_freq()
143 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
164 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
166 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_gmu_set_freq()
173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq()
175 return gmu->freq; in a6xx_gmu_get_freq()
181 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
184 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
190 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
207 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_start()
237 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
252 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
308 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
311 return -EINVAL; in a6xx_gmu_set_oob()
313 if (gmu->legacy) { in a6xx_gmu_set_oob()
320 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
321 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
323 return -EINVAL; in a6xx_gmu_set_oob()
335 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
351 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
356 if (gmu->legacy) in a6xx_gmu_clear_oob()
370 if (!gmu->legacy) in a6xx_sptprac_enable()
379 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
392 if (!gmu->legacy) in a6xx_sptprac_disable()
404 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
417 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
429 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gemnoc_workaround()
449 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
452 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
467 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
468 ret = -ETIMEDOUT; in a6xx_gmu_notify_slumber()
490 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
498 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
517 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
533 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_init()
534 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
660 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
669 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_power_config()
682 switch (gmu->idle_level) { in a6xx_gmu_power_config()
698 /* Enable RPMh GPU client */ in a6xx_gmu_power_config()
718 if (!in_range(blk->addr, bo->iova, bo->size)) in fw_block_mem()
721 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); in fw_block_mem()
728 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_load()
729 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; in a6xx_gmu_fw_load()
739 if (gmu->legacy) { in a6xx_gmu_fw_load()
741 if (fw_image->size > 0x8000) { in a6xx_gmu_fw_load()
742 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
744 return -EINVAL; in a6xx_gmu_fw_load()
748 (u32*) fw_image->data, fw_image->size); in a6xx_gmu_fw_load()
753 for (blk = (const struct block_header *) fw_image->data; in a6xx_gmu_fw_load()
754 (const u8*) blk < fw_image->data + fw_image->size; in a6xx_gmu_fw_load()
755 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { in a6xx_gmu_fw_load()
756 if (blk->size == 0) in a6xx_gmu_fw_load()
759 if (in_range(blk->addr, itcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
760 reg_offset = (blk->addr - itcm_base) >> 2; in a6xx_gmu_fw_load()
763 blk->data, blk->size); in a6xx_gmu_fw_load()
764 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { in a6xx_gmu_fw_load()
765 reg_offset = (blk->addr - dtcm_base) >> 2; in a6xx_gmu_fw_load()
768 blk->data, blk->size); in a6xx_gmu_fw_load()
769 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
770 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
771 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
772 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
774 blk->addr, blk->size, blk->data[0]); in a6xx_gmu_fw_load()
784 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_fw_start()
785 const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; in a6xx_gmu_fw_start()
807 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], in a6xx_gmu_fw_start()
809 return -ENOENT; in a6xx_gmu_fw_start()
825 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
847 if (a6xx_info->gmu_chipid) { in a6xx_gmu_fw_start()
848 chipid = a6xx_info->gmu_chipid; in a6xx_gmu_fw_start()
857 chipid = adreno_gpu->chip_id & 0xffff0000; in a6xx_gmu_fw_start()
858 chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ in a6xx_gmu_fw_start()
859 chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ in a6xx_gmu_fw_start()
865 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
866 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
871 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
881 if (gmu->legacy) { in a6xx_gmu_fw_start()
888 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
914 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
915 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
924 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_off()
931 /* Make sure there are no outstanding RPMh votes */ in a6xx_gmu_rpmh_off()
946 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_force_off()
947 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_force_off()
953 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
966 /* Make sure there are no outstanding RPMh votes */ in a6xx_gmu_force_off()
988 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
990 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_freq()
994 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1002 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1004 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); in a6xx_gmu_set_initial_bw()
1008 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); in a6xx_gmu_set_initial_bw()
1014 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_resume()
1015 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_resume()
1016 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume()
1019 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1020 return -EINVAL; in a6xx_gmu_resume()
1022 gmu->hung = false; in a6xx_gmu_resume()
1025 if (!IS_ERR(gmu->qmp)) { in a6xx_gmu_resume()
1026 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", in a6xx_gmu_resume()
1029 dev_err(gmu->dev, "failed to send GPU ACD state\n"); in a6xx_gmu_resume()
1033 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1040 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1041 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1044 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1045 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1047 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1049 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1050 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1060 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1066 } else if (gmu->legacy) { in a6xx_gmu_resume()
1091 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1099 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1101 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1102 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1112 if (!gmu->initialized) in a6xx_gmu_isidle()
1127 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_shutdown()
1145 a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); in a6xx_gmu_shutdown()
1165 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1179 /* Tell RPMh to power off the GPU */ in a6xx_gmu_shutdown()
1186 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop()
1187 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_gmu_stop()
1189 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1196 if (gmu->hung) in a6xx_gmu_stop()
1202 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_gmu_stop()
1209 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1210 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1212 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1214 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1221 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1222 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1223 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1224 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1225 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1226 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1228 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1229 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1236 struct drm_device *dev = a6xx_gpu->base.base.dev; in a6xx_gmu_memory_alloc()
1243 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1254 bo->obj = msm_gem_new(dev, size, flags); in a6xx_gmu_memory_alloc()
1255 if (IS_ERR(bo->obj)) in a6xx_gmu_memory_alloc()
1256 return PTR_ERR(bo->obj); in a6xx_gmu_memory_alloc()
1258 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1261 drm_gem_object_put(bo->obj); in a6xx_gmu_memory_alloc()
1265 bo->virt = msm_gem_get_vaddr(bo->obj); in a6xx_gmu_memory_alloc()
1266 bo->size = size; in a6xx_gmu_memory_alloc()
1268 msm_gem_object_set_name(bo->obj, name); in a6xx_gmu_memory_alloc()
1277 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1279 return -ENODEV; in a6xx_gmu_memory_probe()
1283 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1284 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1285 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1290 /* Return the 'arc-level' for the given frequency */
1327 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1334 if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER)) in a6xx_gmu_rpmh_arc_votes_init()
1341 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1358 "Level %u not found in the RPMh list\n", in a6xx_gmu_rpmh_arc_votes_init()
1364 return -EINVAL; in a6xx_gmu_rpmh_arc_votes_init()
1390 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1391 * to construct the list of votes on the CPU and send it over. Query the RPMh
1398 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_rpmh_votes_init()
1399 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_rpmh_votes_init()
1403 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1404 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1407 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1408 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1428 count = size - 1; in a6xx_gmu_build_freq_table()
1448 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_pwrlevels_probe()
1449 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_gmu_pwrlevels_probe()
1457 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1459 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1463 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1464 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1470 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1471 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1473 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1475 /* Build the list of RPMh votes that we'll send to the GMU */ in a6xx_gmu_pwrlevels_probe()
1481 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1486 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1488 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1489 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1491 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1492 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1505 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); in a6xx_gmu_get_mmio()
1506 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1509 ret = ioremap(res->start, resource_size(res)); in a6xx_gmu_get_mmio()
1511 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); in a6xx_gmu_get_mmio()
1512 return ERR_PTR(-EINVAL); in a6xx_gmu_get_mmio()
1527 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", in a6xx_gmu_get_irq()
1537 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_remove()
1538 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove()
1539 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1541 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1542 if (!gmu->initialized) { in a6xx_gmu_remove()
1543 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1547 gmu->initialized = false; in a6xx_gmu_remove()
1549 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1551 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1554 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1557 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1559 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1560 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1561 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1564 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1565 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1567 iounmap(gmu->mmio); in a6xx_gmu_remove()
1569 iounmap(gmu->rscc); in a6xx_gmu_remove()
1570 gmu->mmio = NULL; in a6xx_gmu_remove()
1571 gmu->rscc = NULL; in a6xx_gmu_remove()
1576 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1577 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1581 put_device(gmu->dev); in a6xx_gmu_remove()
1590 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1598 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init()
1602 return -ENODEV; in a6xx_gmu_wrapper_init()
1604 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1606 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1608 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1611 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1614 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1615 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1616 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1620 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1621 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1622 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1626 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1627 ret = -ENODEV; in a6xx_gmu_wrapper_init()
1631 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1632 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1633 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1636 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1637 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1638 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1642 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1647 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1650 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1653 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1660 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_gmu_init()
1661 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init()
1667 return -ENODEV; in a6xx_gmu_init()
1669 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1671 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1674 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1676 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1689 * For now just hardcode allocations based on the known firmware. in a6xx_gmu_init()
1694 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1697 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1702 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1706 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1714 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1715 SZ_16M - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1719 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition in a6xx_gmu_init()
1725 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1726 SZ_256K - SZ_16K, 0x04000, "icache"); in a6xx_gmu_init()
1730 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1731 SZ_256K - SZ_16K, 0x44000, "dcache"); in a6xx_gmu_init()
1736 gmu->legacy = true; in a6xx_gmu_init()
1739 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1745 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1750 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1755 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1756 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1757 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1763 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1764 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1765 ret = -ENODEV; in a6xx_gmu_init()
1769 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1773 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1774 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1776 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1777 ret = -ENODEV; in a6xx_gmu_init()
1781 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1782 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1783 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1787 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
1789 ret = -ENODEV; in a6xx_gmu_init()
1793 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
1794 if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { in a6xx_gmu_init()
1795 ret = PTR_ERR(gmu->qmp); in a6xx_gmu_init()
1799 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1800 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
1801 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
1807 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1815 /* Initialize RPMh */ in a6xx_gmu_init()
1818 gmu->initialized = true; in a6xx_gmu_init()
1826 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
1829 iounmap(gmu->mmio); in a6xx_gmu_init()
1831 iounmap(gmu->rscc); in a6xx_gmu_init()
1832 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1833 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1839 put_device(gmu->dev); in a6xx_gmu_init()