Home
last modified time | relevance | path

Searched refs:gmu (Results 1 – 25 of 34) sorted by relevance

12

/linux/drivers/gpu/drm/msm/adreno/
H A Da6xx_gmu.c21 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument
23 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
28 gmu->hung = true; in a6xx_gmu_fault()
39 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
42 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
43 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
46 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
48 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
52 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
55 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
[all …]
H A Da6xx_gmu.h135 #define GMU_BYTE_OFFSET(gmu, offset) (((offset) << 2) - (gmu)->mmio_offset) argument
137 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) in gmu_read() argument
140 return readl(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset)); in gmu_read()
143 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) in gmu_write() argument
145 writel(value, gmu->mmio + GMU_BYTE_OFFSET(gmu, offset)); in gmu_write()
149 gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) in gmu_write_bulk() argument
151 memcpy_toio(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset), data, size); in gmu_write_bulk()
155 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) in gmu_rmw() argument
157 u32 val = gmu_read(gmu, reg); in gmu_rmw()
161 gmu_write(gmu, reg, val | or); in gmu_rmw()
[all …]
H A Da6xx_hfi.c31 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, in a6xx_hfi_queue_read() argument
62 if (!gmu->legacy) in a6xx_hfi_queue_read()
69 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, in a6xx_hfi_queue_write() argument
93 if (!gmu->legacy) { in a6xx_hfi_queue_write()
101 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); in a6xx_hfi_queue_write()
105 static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum) in a6xx_hfi_wait_for_msg_interrupt() argument
109 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_hfi_wait_for_msg_interrupt()
113 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_hfi_wait_for_msg_interrupt()
130 DRM_DEV_ERROR(gmu->dev, in a6xx_hfi_wait_for_msg_interrupt()
137 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, in a6xx_hfi_wait_for_msg_interrupt()
[all …]
H A Da6xx_gpu.c24 count_hi = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H); in read_gmu_ao_counter()
25 count_lo = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L); in read_gmu_ao_counter()
26 temp = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H); in read_gmu_ao_counter()
54 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in fenced_write() local
68 if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status, in fenced_write()
76 if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status, in fenced_write()
83 dev_err_ratelimited(gmu->dev, "delay in fenced register write (0x%x)\n", in fenced_write()
88 dev_err_ratelimited(gmu->dev, "fenced register write (0x%x) fail\n", in fenced_write()
116 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
629 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg() local
[all …]
H A Da8xx_gpu.c126 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a8xx_check_idle()
190 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a8xx_set_hwcg() local
196 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, in a8xx_set_hwcg()
198 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, in a8xx_set_hwcg()
200 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, in a8xx_set_hwcg()
494 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init() local
500 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
545 gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1, in hw_init()
584 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); in hw_init()
587 gmu_rmw(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0, 0xff, BIT(5)); in hw_init()
[all …]
H A Da6xx_gpu.h92 struct a6xx_gmu gmu; member
264 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
266 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
268 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
269 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
H A Da6xx_gpu_state.c155 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) in a6xx_crashdumper_run()
1198 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in _a6xx_get_gmu_registers() local
1218 val = gmu_read_rscc(gmu, offset); in _a6xx_get_gmu_registers()
1220 val = gmu_read(gmu, offset); in _a6xx_get_gmu_registers()
1254 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) in a6xx_get_gmu_registers()
1258 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_get_gmu_registers()
1292 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_snapshot_gmu_hfi_history() local
1295 BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); in a6xx_snapshot_gmu_hfi_history()
1297 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_snapshot_gmu_hfi_history()
1298 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_snapshot_gmu_hfi_history()
[all …]
H A Da6xx_preempt.c151 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on); in a6xx_preempt_keepalive_vote()
/linux/arch/arm64/boot/dts/qcom/
H A Dpurwa.dtsi29 &gmu {
30 compatible = "qcom,adreno-gmu-x145.0", "qcom,adreno-gmu";
H A Dsdm670.dtsi1375 qcom,gmu = <&gmu>;
1486 gmu: gmu@506a000 { label
1487 compatible = "qcom,adreno-gmu-615.0", "qcom,adreno-gmu";
1492 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
1496 interrupt-names = "hfi", "gmu";
1502 clock-names = "gmu", "cxo", "axi", "memnoc";
H A Dsm6350.dtsi1458 qcom,gmu = <&gmu>;
1542 gmu: gmu@3d6a000 { label
1543 compatible = "qcom,adreno-gmu-619.0", "qcom,adreno-gmu";
1547 reg-names = "gmu",
1554 "gmu";
1562 "gmu",
H A Dsm8350.dtsi2049 qcom,gmu = <&gmu>;
2114 gmu: gmu@3d6a000 { label
2115 compatible = "qcom,adreno-gmu-660.1", "qcom,adreno-gmu";
2120 reg-names = "gmu", "rscc", "gmu_pdc";
2124 interrupt-names = "hfi", "gmu";
2133 clock-names = "gmu",
H A Dsc8180x.dtsi2277 qcom,gmu = <&gmu>;
2326 gmu: gmu@2c6a000 { label
2327 compatible = "qcom,adreno-gmu-680.1", "qcom,adreno-gmu";
2332 reg-names = "gmu",
2338 interrupt-names = "hfi", "gmu";
2345 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
H A Dsc7180.dtsi2178 qcom,gmu = <&gmu>;
2276 gmu: gmu@506a000 { label
2277 compatible = "qcom,adreno-gmu-618.0", "qcom,adreno-gmu";
2280 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
2283 interrupt-names = "hfi", "gmu";
2288 clock-names = "gmu", "cxo", "axi", "memnoc";
H A Dsm8150-mtp.dts353 &gmu {
H A Dsm8150.dtsi2250 qcom,gmu = <&gmu>;
2303 gmu: gmu@2c6a000 { label
2304 compatible = "qcom,adreno-gmu-640.1", "qcom,adreno-gmu";
2309 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
2313 interrupt-names = "hfi", "gmu";
2320 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
H A Dagatti.dtsi1609 "gmu",
1620 qcom,gmu = <&gmu_wrapper>;
1689 gmu_wrapper: gmu@596a000 {
1690 compatible = "qcom,adreno-gmu-wrapper";
1692 reg-names = "gmu";
H A Dsm8450.dtsi2457 qcom,gmu = <&gmu>;
2531 gmu: gmu@3d6a000 { label
2532 compatible = "qcom,adreno-gmu-730.1", "qcom,adreno-gmu";
2536 reg-names = "gmu", "rscc", "gmu_pdc";
2540 interrupt-names = "hfi", "gmu";
2550 "gmu",
2632 clock-names = "gmu",
H A Dsm6115.dtsi1732 "gmu",
1740 qcom,gmu = <&gmu_wrapper>;
1806 gmu_wrapper: gmu@596a000 {
1807 compatible = "qcom,adreno-gmu-wrapper";
1809 reg-names = "gmu";
H A Dmonaco.dtsi4316 qcom,gmu = <&gmu>;
4364 gmu: gmu@3d6a000 { label
4365 compatible = "qcom,adreno-gmu-623.0", "qcom,adreno-gmu";
4369 reg-names = "gmu", "rscc", "gmu_pdc";
4372 interrupt-names = "hfi", "gmu";
4379 clock-names = "gmu",
H A Dsdm845.dtsi4897 qcom,gmu = <&gmu>;
4978 gmu: gmu@506a000 { label
4979 compatible = "qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
4984 reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
4988 interrupt-names = "hfi", "gmu";
4994 clock-names = "gmu", "cxo", "axi", "memnoc";
H A Dsm8250-xiaomi-pipa.dts409 &gmu {
H A Dsm8250.dtsi2939 qcom,gmu = <&gmu>;
2998 gmu: gmu@3d6a000 { label
2999 compatible = "qcom,adreno-gmu-650.2", "qcom,adreno-gmu";
3005 reg-names = "gmu", "rscc", "gmu_pdc", "gmu_pdc_seq";
3009 interrupt-names = "hfi", "gmu";
3016 clock-names = "ahb", "gmu", "cxo", "axi", "memnoc";
H A Dsm8550.dtsi2485 qcom,gmu = <&gmu>;
2552 gmu: gmu@3d6a000 { label
2553 compatible = "qcom,adreno-gmu-740.1", "qcom,adreno-gmu";
2557 reg-names = "gmu", "rscc", "gmu_pdc";
2561 interrupt-names = "hfi", "gmu";
2571 "gmu",
H A Dsc8280xp.dtsi3367 qcom,gmu = <&gmu>;
3431 gmu: gmu@3d6a000 { label
3432 compatible = "qcom,adreno-gmu-690.0", "qcom,adreno-gmu";
3436 reg-names = "gmu", "rscc", "gmu_pdc";
3439 interrupt-names = "hfi", "gmu";
3447 clock-names = "gmu",

12