Lines Matching full:gpu
19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle()
37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument
40 if (!adreno_idle(gpu, ring)) in a6xx_idle()
43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle()
44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle()
45 gpu->name, __builtin_return_address(0), in a6xx_idle()
46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle()
47 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), in a6xx_idle()
48 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_idle()
49 gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); in a6xx_idle()
56 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
69 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_flush() argument
71 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_flush()
76 update_shadow_rptr(gpu, ring); in a6xx_flush()
89 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in a6xx_flush()
179 * lingering in that part of the GPU in a6xx_set_pagetable()
207 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument
210 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_submit()
222 * GPU registers so we need to add 0x1a800 to the register value on A630 in a6xx_submit()
261 update_shadow_rptr(gpu, ring); in a6xx_submit()
285 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); in a6xx_submit()
287 a6xx_flush(gpu, ring); in a6xx_submit()
330 static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a7xx_submit() argument
333 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_submit()
351 if (gpu->nr_rings > 1) in a7xx_submit()
396 update_shadow_rptr(gpu, ring); in a7xx_submit()
465 if (gpu->nr_rings > 1) { in a7xx_submit()
484 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); in a7xx_submit()
486 a6xx_flush(gpu, ring); in a7xx_submit()
489 a6xx_preempt_trigger(gpu); in a7xx_submit()
492 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) in a6xx_set_hwcg() argument
494 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_hwcg()
525 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1); in a6xx_set_hwcg()
526 gpu_write(gpu, REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0); in a6xx_set_hwcg()
529 gpu_write(gpu, REG_A7XX_RBBM_CGC_P2S_TRIG_CMD, 1); in a6xx_set_hwcg()
531 if (gpu_poll_timeout(gpu, REG_A7XX_RBBM_CGC_P2S_STATUS, val, in a6xx_set_hwcg()
533 dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n"); in a6xx_set_hwcg()
537 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 0); in a6xx_set_hwcg()
543 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); in a6xx_set_hwcg()
554 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
560 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); in a6xx_set_hwcg()
563 static void a6xx_set_cp_protect(struct msm_gpu *gpu) in a6xx_set_cp_protect() argument
565 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_cp_protect()
574 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, in a6xx_set_cp_protect()
582 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), protect->regs[i]); in a6xx_set_cp_protect()
585 gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]); in a6xx_set_cp_protect()
588 static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) in a6xx_calc_ubwc_config() argument
590 gpu->ubwc_config.rgb565_predicator = 0; in a6xx_calc_ubwc_config()
591 gpu->ubwc_config.uavflagprd_inv = 0; in a6xx_calc_ubwc_config()
592 gpu->ubwc_config.min_acc_len = 0; in a6xx_calc_ubwc_config()
593 gpu->ubwc_config.ubwc_swizzle = 0x6; in a6xx_calc_ubwc_config()
594 gpu->ubwc_config.macrotile_mode = 0; in a6xx_calc_ubwc_config()
595 gpu->ubwc_config.highest_bank_bit = 15; in a6xx_calc_ubwc_config()
597 if (adreno_is_a610(gpu)) { in a6xx_calc_ubwc_config()
598 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
599 gpu->ubwc_config.min_acc_len = 1; in a6xx_calc_ubwc_config()
600 gpu->ubwc_config.ubwc_swizzle = 0x7; in a6xx_calc_ubwc_config()
603 if (adreno_is_a618(gpu)) in a6xx_calc_ubwc_config()
604 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
606 if (adreno_is_a619(gpu)) in a6xx_calc_ubwc_config()
608 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
610 if (adreno_is_a619_holi(gpu)) in a6xx_calc_ubwc_config()
611 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
613 if (adreno_is_a621(gpu)) { in a6xx_calc_ubwc_config()
614 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
615 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
616 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
619 if (adreno_is_a640_family(gpu)) in a6xx_calc_ubwc_config()
620 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
622 if (adreno_is_a680(gpu)) in a6xx_calc_ubwc_config()
623 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
625 if (adreno_is_a650(gpu) || in a6xx_calc_ubwc_config()
626 adreno_is_a660(gpu) || in a6xx_calc_ubwc_config()
627 adreno_is_a690(gpu) || in a6xx_calc_ubwc_config()
628 adreno_is_a730(gpu) || in a6xx_calc_ubwc_config()
629 adreno_is_a740_family(gpu)) { in a6xx_calc_ubwc_config()
631 gpu->ubwc_config.highest_bank_bit = 16; in a6xx_calc_ubwc_config()
632 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
633 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
634 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
635 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
638 if (adreno_is_a663(gpu)) { in a6xx_calc_ubwc_config()
639 gpu->ubwc_config.highest_bank_bit = 13; in a6xx_calc_ubwc_config()
640 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
641 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
642 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
643 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
644 gpu->ubwc_config.ubwc_swizzle = 0x4; in a6xx_calc_ubwc_config()
647 if (adreno_is_7c3(gpu)) { in a6xx_calc_ubwc_config()
648 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
649 gpu->ubwc_config.amsbc = 1; in a6xx_calc_ubwc_config()
650 gpu->ubwc_config.rgb565_predicator = 1; in a6xx_calc_ubwc_config()
651 gpu->ubwc_config.uavflagprd_inv = 2; in a6xx_calc_ubwc_config()
652 gpu->ubwc_config.macrotile_mode = 1; in a6xx_calc_ubwc_config()
655 if (adreno_is_a702(gpu)) { in a6xx_calc_ubwc_config()
656 gpu->ubwc_config.highest_bank_bit = 14; in a6xx_calc_ubwc_config()
657 gpu->ubwc_config.min_acc_len = 1; in a6xx_calc_ubwc_config()
661 static void a6xx_set_ubwc_config(struct msm_gpu *gpu) in a6xx_set_ubwc_config() argument
663 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_ubwc_config()
676 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, in a6xx_set_ubwc_config()
683 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, in a6xx_set_ubwc_config()
688 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, in a6xx_set_ubwc_config()
695 gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, in a6xx_set_ubwc_config()
698 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, in a6xx_set_ubwc_config()
701 gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, in a6xx_set_ubwc_config()
705 static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu) in a7xx_patch_pwrup_reglist() argument
707 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_patch_pwrup_reglist()
723 * register value into the GPU buffer in a7xx_patch_pwrup_reglist()
727 *dest++ = gpu_read(gpu, reglist->regs[i]); in a7xx_patch_pwrup_reglist()
748 static int a7xx_preempt_start(struct msm_gpu *gpu) in a7xx_preempt_start() argument
750 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_preempt_start()
752 struct msm_ringbuffer *ring = gpu->rb[0]; in a7xx_preempt_start()
754 if (gpu->nr_rings <= 1) in a7xx_preempt_start()
771 a6xx_flush(gpu, ring); in a7xx_preempt_start()
773 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a7xx_preempt_start()
776 static int a6xx_cp_init(struct msm_gpu *gpu) in a6xx_cp_init() argument
778 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
801 a6xx_flush(gpu, ring); in a6xx_cp_init()
802 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
805 static int a7xx_cp_init(struct msm_gpu *gpu) in a7xx_cp_init() argument
807 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a7xx_cp_init()
809 struct msm_ringbuffer *ring = gpu->rb[0]; in a7xx_cp_init()
852 a6xx_flush(gpu, ring); in a7xx_cp_init()
853 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a7xx_cp_init()
864 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version() local
904 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
913 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
919 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
920 "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); in a6xx_ucode_check_version()
927 static int a6xx_ucode_load(struct msm_gpu *gpu) in a6xx_ucode_load() argument
929 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_ucode_load()
933 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_load()
940 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_load()
948 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_load()
962 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in a6xx_ucode_load()
963 sizeof(u32) * gpu->nr_rings, in a6xx_ucode_load()
965 gpu->aspace, &a6xx_gpu->shadow_bo, in a6xx_ucode_load()
974 a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE, in a6xx_ucode_load()
976 gpu->aspace, &a6xx_gpu->pwrup_reglist_bo, in a6xx_ucode_load()
987 static int a6xx_zap_shader_init(struct msm_gpu *gpu) in a6xx_zap_shader_init() argument
995 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); in a6xx_zap_shader_init()
1037 static int hw_init(struct msm_gpu *gpu) in hw_init() argument
1039 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in hw_init()
1047 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
1055 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1056 gpu_read(gpu, REG_A6XX_GBIF_HALT); in hw_init()
1058 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0); in hw_init()
1059 gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL); in hw_init()
1061 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1062 gpu_read(gpu, REG_A6XX_GBIF_HALT); in hw_init()
1064 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); in hw_init()
1065 gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT); in hw_init()
1068 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); in hw_init()
1078 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); in hw_init()
1079 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); in hw_init()
1083 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); in hw_init()
1084 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); in hw_init()
1085 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); in hw_init()
1086 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); in hw_init()
1087 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); in hw_init()
1088 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); in hw_init()
1089 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); in hw_init()
1090 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); in hw_init()
1091 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); in hw_init()
1092 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); in hw_init()
1093 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); in hw_init()
1094 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); in hw_init()
1098 a6xx_set_hwcg(gpu, true); in hw_init()
1105 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); in hw_init()
1106 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); in hw_init()
1107 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); in hw_init()
1108 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); in hw_init()
1109 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, in hw_init()
1112 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1116 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); in hw_init()
1119 gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0); in hw_init()
1121 /* Make all blocks contribute to the GPU BUSY perf counter */ in hw_init()
1122 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); in hw_init()
1126 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base); in hw_init()
1127 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); in hw_init()
1129 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0); in hw_init()
1130 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base); in hw_init()
1131 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base); in hw_init()
1139 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1140 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min); in hw_init()
1142 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, in hw_init()
1147 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23)); in hw_init()
1149 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); in hw_init()
1150 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); in hw_init()
1154 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); in hw_init()
1155 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1157 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); in hw_init()
1158 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); in hw_init()
1160 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); in hw_init()
1161 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1165 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); in hw_init()
1169 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); in hw_init()
1170 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); in hw_init()
1172 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 64); in hw_init()
1173 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 63); in hw_init()
1175 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); in hw_init()
1180 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, in hw_init()
1184 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); in hw_init()
1187 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); in hw_init()
1196 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); in hw_init()
1198 a6xx_set_ubwc_config(gpu); in hw_init()
1203 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); in hw_init()
1205 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff); in hw_init()
1207 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); in hw_init()
1209 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff); in hw_init()
1211 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); in hw_init()
1213 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1); in hw_init()
1217 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); in hw_init()
1218 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, in hw_init()
1220 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, in hw_init()
1222 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, in hw_init()
1224 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, in hw_init()
1236 a6xx_set_cp_protect(gpu); in hw_init()
1240 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801); in hw_init()
1242 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); in hw_init()
1243 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); in hw_init()
1246 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(24)); in hw_init()
1250 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); in hw_init()
1251 /* Set dualQ + disable afull for A660 GPU */ in hw_init()
1253 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); in hw_init()
1255 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, in hw_init()
1263 if (gpu->hw_apriv) { in hw_init()
1265 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1267 gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL, in hw_init()
1269 gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL, in hw_init()
1272 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1278 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(19), BIT(19)); in hw_init()
1281 gpu_write(gpu, REG_A6XX_TPL1_DBG_ECO_CNTL1, 0xc0700); in hw_init()
1284 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(11), BIT(11)); in hw_init()
1288 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, in hw_init()
1291 ret = adreno_hw_init(gpu); in hw_init()
1295 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); in hw_init()
1298 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init()
1305 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); in hw_init()
1307 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, in hw_init()
1312 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, in hw_init()
1313 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1314 for (unsigned int i = 0; i < gpu->nr_rings; i++) in hw_init()
1320 gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, in hw_init()
1321 rbmemptr(gpu->rb[0], bv_rptr)); in hw_init()
1324 a6xx_preempt_hw_init(gpu); in hw_init()
1327 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1329 for (i = 0; i < gpu->nr_rings; i++) in hw_init()
1330 gpu->rb[i]->cur_ctx_seqno = 0; in hw_init()
1333 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); in hw_init()
1336 a7xx_patch_pwrup_reglist(gpu); in hw_init()
1340 ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); in hw_init()
1351 ret = a6xx_zap_shader_init(gpu); in hw_init()
1353 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1354 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1356 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1357 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1366 dev_warn_once(gpu->dev->dev, in hw_init()
1368 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); in hw_init()
1379 a7xx_preempt_start(gpu); in hw_init()
1382 * Tell the GMU that we are done touching the GPU and it can start power in hw_init()
1395 static int a6xx_hw_init(struct msm_gpu *gpu) in a6xx_hw_init() argument
1397 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_hw_init()
1402 ret = hw_init(gpu); in a6xx_hw_init()
1408 static void a6xx_dump(struct msm_gpu *gpu) in a6xx_dump() argument
1410 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1411 gpu_read(gpu, REG_A6XX_RBBM_STATUS)); in a6xx_dump()
1412 adreno_dump(gpu); in a6xx_dump()
1415 static void a6xx_recover(struct msm_gpu *gpu) in a6xx_recover() argument
1417 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_recover()
1422 adreno_dump_info(gpu); in a6xx_recover()
1425 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1426 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); in a6xx_recover()
1429 a6xx_dump(gpu); in a6xx_recover()
1438 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); in a6xx_recover()
1440 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1443 mutex_lock(&gpu->active_lock); in a6xx_recover()
1444 active_submits = gpu->active_submits; in a6xx_recover()
1450 gpu->active_submits = 0; in a6xx_recover()
1456 /* Reset the GPU to a clean state */ in a6xx_recover()
1457 a6xx_gpu_sw_reset(gpu, true); in a6xx_recover()
1458 a6xx_gpu_sw_reset(gpu, false); in a6xx_recover()
1467 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
1470 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
1473 DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); in a6xx_recover()
1477 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1480 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
1482 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
1484 gpu->active_submits = active_submits; in a6xx_recover()
1485 mutex_unlock(&gpu->active_lock); in a6xx_recover()
1487 msm_gpu_hw_init(gpu); in a6xx_recover()
1491 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) in a6xx_uche_fault_block() argument
1493 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_uche_fault_block()
1511 val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); in a6xx_uche_fault_block()
1588 static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) in a6xx_fault_block() argument
1590 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_block()
1603 return a6xx_uche_fault_block(gpu, id); in a6xx_fault_block()
1608 struct msm_gpu *gpu = arg; in a6xx_fault_handler() local
1613 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), in a6xx_fault_handler()
1614 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), in a6xx_fault_handler()
1615 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), in a6xx_fault_handler()
1616 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)), in a6xx_fault_handler()
1620 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
1622 return adreno_fault_handler(gpu, iova, flags, info, block, scratch); in a6xx_fault_handler()
1625 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) in a6xx_cp_hw_err_irq() argument
1627 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); in a6xx_cp_hw_err_irq()
1632 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); in a6xx_cp_hw_err_irq()
1633 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); in a6xx_cp_hw_err_irq()
1634 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1640 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1644 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
1645 gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); in a6xx_cp_hw_err_irq()
1648 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); in a6xx_cp_hw_err_irq()
1650 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1656 if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu))) in a6xx_cp_hw_err_irq()
1657 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
1660 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
1663 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
1667 static void a6xx_fault_detect_irq(struct msm_gpu *gpu) in a6xx_fault_detect_irq() argument
1669 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_detect_irq()
1671 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1674 * If stalled on SMMU fault, we could trip the GPU's hang detection, in a6xx_fault_detect_irq()
1679 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) in a6xx_fault_detect_irq()
1683 * Force the GPU to stay on until after we finish in a6xx_fault_detect_irq()
1689 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
1690 …"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", in a6xx_fault_detect_irq()
1692 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_fault_detect_irq()
1693 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_fault_detect_irq()
1694 gpu_read(gpu, REG_A6XX_CP_RB_WPTR), in a6xx_fault_detect_irq()
1695 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_fault_detect_irq()
1696 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_fault_detect_irq()
1697 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_fault_detect_irq()
1698 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); in a6xx_fault_detect_irq()
1701 del_timer(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
1703 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1706 static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu) in a7xx_sw_fuse_violation_irq() argument
1710 status = gpu_read(gpu, REG_A7XX_RBBM_SW_FUSE_INT_STATUS); in a7xx_sw_fuse_violation_irq()
1711 gpu_write(gpu, REG_A7XX_RBBM_SW_FUSE_INT_MASK, 0); in a7xx_sw_fuse_violation_irq()
1713 dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status); in a7xx_sw_fuse_violation_irq()
1721 del_timer(&gpu->hangcheck_timer); in a7xx_sw_fuse_violation_irq()
1723 kthread_queue_work(gpu->worker, &gpu->recover_work); in a7xx_sw_fuse_violation_irq()
1727 static irqreturn_t a6xx_irq(struct msm_gpu *gpu) in a6xx_irq() argument
1729 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
1730 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); in a6xx_irq()
1732 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); in a6xx_irq()
1738 a6xx_fault_detect_irq(gpu); in a6xx_irq()
1741 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
1744 a6xx_cp_hw_err_irq(gpu); in a6xx_irq()
1747 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
1750 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
1753 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
1756 a7xx_sw_fuse_violation_irq(gpu); in a6xx_irq()
1759 msm_gpu_retire(gpu); in a6xx_irq()
1760 a6xx_preempt_trigger(gpu); in a6xx_irq()
1764 a6xx_preempt_irq(gpu); in a6xx_irq()
1778 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate() local
1795 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | in a6xx_llc_activate()
1816 * Program the slice IDs for the various GPU blocks and GPU MMU in a6xx_llc_activate()
1832 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); in a6xx_llc_activate()
1838 struct msm_gpu *gpu = &adreno_gpu->base; in a7xx_llc_activate() local
1848 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, in a7xx_llc_activate()
1856 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, in a7xx_llc_activate()
1907 struct msm_gpu *gpu = &adreno_gpu->base; in a7xx_cx_mem_init() local
1921 dev_warn_once(gpu->dev->dev, in a7xx_cx_mem_init()
1962 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_bus_clear_pending_transactions() local
1965 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST); in a6xx_bus_clear_pending_transactions()
1966 spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) & in a6xx_bus_clear_pending_transactions()
1969 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK); in a6xx_bus_clear_pending_transactions()
1970 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & in a6xx_bus_clear_pending_transactions()
1972 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); in a6xx_bus_clear_pending_transactions()
1979 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); in a6xx_bus_clear_pending_transactions()
1980 spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); in a6xx_bus_clear_pending_transactions()
1984 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1985 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
1989 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1990 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
1994 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); in a6xx_bus_clear_pending_transactions()
1997 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert) in a6xx_gpu_sw_reset() argument
2000 if (adreno_is_a610(to_adreno_gpu(gpu))) in a6xx_gpu_sw_reset()
2003 gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert); in a6xx_gpu_sw_reset()
2005 gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD); in a6xx_gpu_sw_reset()
2013 static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) in a6xx_gmu_pm_resume() argument
2015 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_resume()
2019 gpu->needs_hw_init = true; in a6xx_gmu_pm_resume()
2029 msm_devfreq_resume(gpu); in a6xx_gmu_pm_resume()
2036 static int a6xx_pm_resume(struct msm_gpu *gpu) in a6xx_pm_resume() argument
2038 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_resume()
2041 unsigned long freq = gpu->fast_rate; in a6xx_pm_resume()
2045 gpu->needs_hw_init = true; in a6xx_pm_resume()
2051 opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq); in a6xx_pm_resume()
2059 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_pm_resume()
2064 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume()
2071 /* If anything goes south, tear the GPU down piece by piece.. */ in a6xx_pm_resume()
2076 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_resume()
2082 msm_devfreq_resume(gpu); in a6xx_pm_resume()
2087 static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu) in a6xx_gmu_pm_suspend() argument
2089 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_suspend()
2097 msm_devfreq_suspend(gpu); in a6xx_gmu_pm_suspend()
2106 for (i = 0; i < gpu->nr_rings; i++) in a6xx_gmu_pm_suspend()
2109 gpu->suspend_count++; in a6xx_gmu_pm_suspend()
2114 static int a6xx_pm_suspend(struct msm_gpu *gpu) in a6xx_pm_suspend() argument
2116 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_suspend()
2123 msm_devfreq_suspend(gpu); in a6xx_pm_suspend()
2133 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend()
2136 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_suspend()
2142 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
2145 gpu->suspend_count++; in a6xx_pm_suspend()
2150 static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_gmu_get_timestamp() argument
2152 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_get_timestamp()
2157 /* Force the GPU power on so we can read this register */ in a6xx_gmu_get_timestamp()
2160 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_gmu_get_timestamp()
2169 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_get_timestamp() argument
2171 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_get_timestamp()
2175 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) in a6xx_active_ring() argument
2177 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_active_ring()
2183 static void a6xx_destroy(struct msm_gpu *gpu) in a6xx_destroy() argument
2185 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_destroy()
2189 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
2194 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
2207 static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) in a6xx_gpu_busy() argument
2209 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_busy()
2223 static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, in a6xx_gpu_set_freq() argument
2226 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_set_freq()
2230 a6xx_gmu_set_freq(gpu, opp, suspended); in a6xx_gpu_set_freq()
2235 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) in a6xx_create_address_space() argument
2237 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_create_address_space()
2242 * This allows GPU to set the bus attributes required to use system in a6xx_create_address_space()
2249 return adreno_iommu_create_address_space(gpu, pdev, quirks); in a6xx_create_address_space()
2253 a6xx_create_private_address_space(struct msm_gpu *gpu) in a6xx_create_private_address_space() argument
2257 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
2263 "gpu", 0x100000000ULL, in a6xx_create_private_address_space()
2264 adreno_private_address_space_size(gpu)); in a6xx_create_private_address_space()
2267 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_get_rptr() argument
2269 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_get_rptr()
2275 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
2278 static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_progress() argument
2281 .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_progress()
2282 .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_progress()
2283 .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_progress()
2284 .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), in a6xx_progress()
2301 cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16; in a6xx_progress()
2302 cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16; in a6xx_progress()
2457 struct msm_gpu *gpu; in a6xx_gpu_init() local
2467 gpu = &adreno_gpu->base; in a6xx_gpu_init()
2483 /* gpu->info only gets assigned in adreno_gpu_init() */ in a6xx_gpu_init()
2538 if (gpu->aspace) in a6xx_gpu_init()
2539 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()
2544 a6xx_preempt_init(gpu); in a6xx_gpu_init()
2546 return gpu; in a6xx_gpu_init()