xref: /linux/drivers/gpu/drm/msm/adreno/a4xx_gpu.c (revision 4f776f4511c7f7b6576dfc38c609b168b9188d72)
123bd62fdSAravind Ganesan /* Copyright (c) 2014 The Linux Foundation. All rights reserved.
223bd62fdSAravind Ganesan  *
323bd62fdSAravind Ganesan  * This program is free software; you can redistribute it and/or modify
423bd62fdSAravind Ganesan  * it under the terms of the GNU General Public License version 2 and
523bd62fdSAravind Ganesan  * only version 2 as published by the Free Software Foundation.
623bd62fdSAravind Ganesan  *
723bd62fdSAravind Ganesan  * This program is distributed in the hope that it will be useful,
823bd62fdSAravind Ganesan  * but WITHOUT ANY WARRANTY; without even the implied warranty of
923bd62fdSAravind Ganesan  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1023bd62fdSAravind Ganesan  * GNU General Public License for more details.
1123bd62fdSAravind Ganesan  *
1223bd62fdSAravind Ganesan  */
1323bd62fdSAravind Ganesan #include "a4xx_gpu.h"
1423bd62fdSAravind Ganesan #ifdef CONFIG_MSM_OCMEM
1523bd62fdSAravind Ganesan #  include <soc/qcom/ocmem.h>
1623bd62fdSAravind Ganesan #endif
1723bd62fdSAravind Ganesan 
1823bd62fdSAravind Ganesan #define A4XX_INT0_MASK \
1923bd62fdSAravind Ganesan 	(A4XX_INT0_RBBM_AHB_ERROR |        \
2023bd62fdSAravind Ganesan 	 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
2123bd62fdSAravind Ganesan 	 A4XX_INT0_CP_T0_PACKET_IN_IB |    \
2223bd62fdSAravind Ganesan 	 A4XX_INT0_CP_OPCODE_ERROR |       \
2323bd62fdSAravind Ganesan 	 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
2423bd62fdSAravind Ganesan 	 A4XX_INT0_CP_HW_FAULT |           \
2523bd62fdSAravind Ganesan 	 A4XX_INT0_CP_IB1_INT |            \
2623bd62fdSAravind Ganesan 	 A4XX_INT0_CP_IB2_INT |            \
2723bd62fdSAravind Ganesan 	 A4XX_INT0_CP_RB_INT |             \
2823bd62fdSAravind Ganesan 	 A4XX_INT0_CP_REG_PROTECT_FAULT |  \
2923bd62fdSAravind Ganesan 	 A4XX_INT0_CP_AHB_ERROR_HALT |     \
3079d57bf6SBjorn Andersson 	 A4XX_INT0_CACHE_FLUSH_TS |        \
3123bd62fdSAravind Ganesan 	 A4XX_INT0_UCHE_OOB_ACCESS)
3223bd62fdSAravind Ganesan 
3323bd62fdSAravind Ganesan extern bool hang_debug;
3423bd62fdSAravind Ganesan static void a4xx_dump(struct msm_gpu *gpu);
35e895c7bdSJordan Crouse static bool a4xx_idle(struct msm_gpu *gpu);
3623bd62fdSAravind Ganesan 
3723bd62fdSAravind Ganesan /*
3823bd62fdSAravind Ganesan  * a4xx_enable_hwcg() - Program the clock control registers
3923bd62fdSAravind Ganesan  * @device: The adreno device pointer
4023bd62fdSAravind Ganesan  */
4123bd62fdSAravind Ganesan static void a4xx_enable_hwcg(struct msm_gpu *gpu)
4223bd62fdSAravind Ganesan {
4323bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
4423bd62fdSAravind Ganesan 	unsigned int i;
4523bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
4623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
4723bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
4823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
4923bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
5023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
5123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
5223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
5323bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
5423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
5523bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
5623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
5723bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
5823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
5923bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
6023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
6123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
6223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
6323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
6423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
6523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
6623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
6723bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
6823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
6923bd62fdSAravind Ganesan 
7023bd62fdSAravind Ganesan 	/* Disable L1 clocking in A420 due to CCU issues with it */
7123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++) {
7223bd62fdSAravind Ganesan 		if (adreno_is_a420(adreno_gpu)) {
7323bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
7423bd62fdSAravind Ganesan 					0x00002020);
7523bd62fdSAravind Ganesan 		} else {
7623bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
7723bd62fdSAravind Ganesan 					0x00022020);
7823bd62fdSAravind Ganesan 		}
7923bd62fdSAravind Ganesan 	}
8023bd62fdSAravind Ganesan 
8123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++) {
8223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
8323bd62fdSAravind Ganesan 				0x00000922);
8423bd62fdSAravind Ganesan 	}
8523bd62fdSAravind Ganesan 
8623bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++) {
8723bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
8823bd62fdSAravind Ganesan 				0x00000000);
8923bd62fdSAravind Ganesan 	}
9023bd62fdSAravind Ganesan 
9123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++) {
9223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
9323bd62fdSAravind Ganesan 				0x00000001);
9423bd62fdSAravind Ganesan 	}
9523bd62fdSAravind Ganesan 
9623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
9723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
9823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
9923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
10023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
10123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
10223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
10323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
10423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
10523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
10623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
107357ff00bSCraig Stout 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
108357ff00bSCraig Stout 	/* Early A430's have a timing issue with SP/TP power collapse;
109357ff00bSCraig Stout 	   disabling HW clock gating prevents it. */
110357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
111357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
112357ff00bSCraig Stout 	else
11323bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
11423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
11523bd62fdSAravind Ganesan }
11623bd62fdSAravind Ganesan 
117357ff00bSCraig Stout 
118c4a8d475SJordan Crouse static bool a4xx_me_init(struct msm_gpu *gpu)
11923bd62fdSAravind Ganesan {
120f97decacSJordan Crouse 	struct msm_ringbuffer *ring = gpu->rb[0];
12123bd62fdSAravind Ganesan 
12223bd62fdSAravind Ganesan 	OUT_PKT3(ring, CP_ME_INIT, 17);
12323bd62fdSAravind Ganesan 	OUT_RING(ring, 0x000003f7);
12423bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
12523bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
12623bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
12723bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000080);
12823bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000100);
12923bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000180);
13023bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00006600);
13123bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000150);
13223bd62fdSAravind Ganesan 	OUT_RING(ring, 0x0000014e);
13323bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000154);
13423bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000001);
13523bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
13623bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
13723bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
13823bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
13923bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
14023bd62fdSAravind Ganesan 
141f97decacSJordan Crouse 	gpu->funcs->flush(gpu, ring);
142e895c7bdSJordan Crouse 	return a4xx_idle(gpu);
14323bd62fdSAravind Ganesan }
14423bd62fdSAravind Ganesan 
14523bd62fdSAravind Ganesan static int a4xx_hw_init(struct msm_gpu *gpu)
14623bd62fdSAravind Ganesan {
14723bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
14823bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
14923bd62fdSAravind Ganesan 	uint32_t *ptr, len;
15023bd62fdSAravind Ganesan 	int i, ret;
15123bd62fdSAravind Ganesan 
152357ff00bSCraig Stout 	if (adreno_is_a420(adreno_gpu)) {
15323bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
15423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
15523bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
15623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
15723bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
15823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
15923bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
16023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
161357ff00bSCraig Stout 	} else if (adreno_is_a430(adreno_gpu)) {
162357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
163357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
164357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
165357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
166357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
167357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
16823bd62fdSAravind Ganesan 	} else {
16923bd62fdSAravind Ganesan 		BUG();
17023bd62fdSAravind Ganesan 	}
17123bd62fdSAravind Ganesan 
17223bd62fdSAravind Ganesan 	/* Make all blocks contribute to the GPU BUSY perf counter */
17323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
17423bd62fdSAravind Ganesan 
17523bd62fdSAravind Ganesan 	/* Tune the hystersis counters for SP and CP idle detection */
17623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
17723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
17823bd62fdSAravind Ganesan 
179357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
180357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
181357ff00bSCraig Stout 	}
182357ff00bSCraig Stout 
18323bd62fdSAravind Ganesan 	 /* Enable the RBBM error reporting bits */
18423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
18523bd62fdSAravind Ganesan 
18623bd62fdSAravind Ganesan 	/* Enable AHB error reporting*/
18723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
18823bd62fdSAravind Ganesan 
18923bd62fdSAravind Ganesan 	/* Enable power counters*/
19023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
19123bd62fdSAravind Ganesan 
19223bd62fdSAravind Ganesan 	/*
19323bd62fdSAravind Ganesan 	 * Turn on hang detection - this spews a lot of useful information
19423bd62fdSAravind Ganesan 	 * into the RBBM registers on a hang:
19523bd62fdSAravind Ganesan 	 */
19623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
19723bd62fdSAravind Ganesan 			(1 << 30) | 0xFFFF);
19823bd62fdSAravind Ganesan 
19923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
20023bd62fdSAravind Ganesan 			(unsigned int)(a4xx_gpu->ocmem_base >> 14));
20123bd62fdSAravind Ganesan 
20223bd62fdSAravind Ganesan 	/* Turn on performance counters: */
20323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
20423bd62fdSAravind Ganesan 
2056c77d1abSRob Clark 	/* use the first CP counter for timestamp queries.. userspace may set
2066c77d1abSRob Clark 	 * this as well but it selects the same counter/countable:
2076c77d1abSRob Clark 	 */
2086c77d1abSRob Clark 	gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
2096c77d1abSRob Clark 
210357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu))
211357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
212357ff00bSCraig Stout 
21323bd62fdSAravind Ganesan 	/* Disable L2 bypass to avoid UCHE out of bounds errors */
21423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
21523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
21623bd62fdSAravind Ganesan 
21723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
21823bd62fdSAravind Ganesan 			(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
21923bd62fdSAravind Ganesan 
220357ff00bSCraig Stout 	/* On A430 enable SP regfile sleep for power savings */
221357ff00bSCraig Stout 	/* TODO downstream does this for !420, so maybe applies for 405 too? */
222357ff00bSCraig Stout 	if (!adreno_is_a420(adreno_gpu)) {
223357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
224357ff00bSCraig Stout 			0x00000441);
225357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
226357ff00bSCraig Stout 			0x00000441);
227357ff00bSCraig Stout 	}
228357ff00bSCraig Stout 
22923bd62fdSAravind Ganesan 	a4xx_enable_hwcg(gpu);
23023bd62fdSAravind Ganesan 
23123bd62fdSAravind Ganesan 	/*
23223bd62fdSAravind Ganesan 	 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
23323bd62fdSAravind Ganesan 	 * due to timing issue with HLSQ_TP_CLK_EN
23423bd62fdSAravind Ganesan 	 */
23523bd62fdSAravind Ganesan 	if (adreno_is_a420(adreno_gpu)) {
23623bd62fdSAravind Ganesan 		unsigned int val;
23723bd62fdSAravind Ganesan 		val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
23823bd62fdSAravind Ganesan 		val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
23923bd62fdSAravind Ganesan 		val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
24023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
24123bd62fdSAravind Ganesan 	}
24223bd62fdSAravind Ganesan 
24323bd62fdSAravind Ganesan 	/* setup access protection: */
24423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
24523bd62fdSAravind Ganesan 
24623bd62fdSAravind Ganesan 	/* RBBM registers */
24723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
24823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
24923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
25023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
25123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
25223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
25323bd62fdSAravind Ganesan 
25423bd62fdSAravind Ganesan 	/* CP registers */
25523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
25623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
25723bd62fdSAravind Ganesan 
25823bd62fdSAravind Ganesan 
25923bd62fdSAravind Ganesan 	/* RB registers */
26023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
26123bd62fdSAravind Ganesan 
26223bd62fdSAravind Ganesan 	/* HLSQ registers */
26323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
26423bd62fdSAravind Ganesan 
26523bd62fdSAravind Ganesan 	/* VPC registers */
26623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
26723bd62fdSAravind Ganesan 
26823bd62fdSAravind Ganesan 	/* SMMU registers */
26923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
27023bd62fdSAravind Ganesan 
27123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
27223bd62fdSAravind Ganesan 
27323bd62fdSAravind Ganesan 	ret = adreno_hw_init(gpu);
27423bd62fdSAravind Ganesan 	if (ret)
27523bd62fdSAravind Ganesan 		return ret;
27623bd62fdSAravind Ganesan 
27723bd62fdSAravind Ganesan 	/* Load PM4: */
278c5e3548cSJordan Crouse 	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
279c5e3548cSJordan Crouse 	len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
28023bd62fdSAravind Ganesan 	DBG("loading PM4 ucode version: %u", ptr[0]);
28123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
28223bd62fdSAravind Ganesan 	for (i = 1; i < len; i++)
28323bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
28423bd62fdSAravind Ganesan 
28523bd62fdSAravind Ganesan 	/* Load PFP: */
286c5e3548cSJordan Crouse 	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
287c5e3548cSJordan Crouse 	len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
28823bd62fdSAravind Ganesan 	DBG("loading PFP ucode version: %u", ptr[0]);
28923bd62fdSAravind Ganesan 
29023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
29123bd62fdSAravind Ganesan 	for (i = 1; i < len; i++)
29223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
29323bd62fdSAravind Ganesan 
29423bd62fdSAravind Ganesan 	/* clear ME_HALT to start micro engine */
29523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
29623bd62fdSAravind Ganesan 
297c4a8d475SJordan Crouse 	return a4xx_me_init(gpu) ? 0 : -EINVAL;
29823bd62fdSAravind Ganesan }
29923bd62fdSAravind Ganesan 
30023bd62fdSAravind Ganesan static void a4xx_recover(struct msm_gpu *gpu)
30123bd62fdSAravind Ganesan {
302398efc46SRob Clark 	int i;
303398efc46SRob Clark 
30426716185SRob Clark 	adreno_dump_info(gpu);
30526716185SRob Clark 
306398efc46SRob Clark 	for (i = 0; i < 8; i++) {
307398efc46SRob Clark 		printk("CP_SCRATCH_REG%d: %u\n", i,
308398efc46SRob Clark 			gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
309398efc46SRob Clark 	}
310398efc46SRob Clark 
31123bd62fdSAravind Ganesan 	/* dump registers before resetting gpu, if enabled: */
31223bd62fdSAravind Ganesan 	if (hang_debug)
31323bd62fdSAravind Ganesan 		a4xx_dump(gpu);
31423bd62fdSAravind Ganesan 
31523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
31623bd62fdSAravind Ganesan 	gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
31723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
31823bd62fdSAravind Ganesan 	adreno_recover(gpu);
31923bd62fdSAravind Ganesan }
32023bd62fdSAravind Ganesan 
32123bd62fdSAravind Ganesan static void a4xx_destroy(struct msm_gpu *gpu)
32223bd62fdSAravind Ganesan {
32323bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
32423bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
32523bd62fdSAravind Ganesan 
32623bd62fdSAravind Ganesan 	DBG("%s", gpu->name);
32723bd62fdSAravind Ganesan 
32823bd62fdSAravind Ganesan 	adreno_gpu_cleanup(adreno_gpu);
32923bd62fdSAravind Ganesan 
33023bd62fdSAravind Ganesan #ifdef CONFIG_MSM_OCMEM
33123bd62fdSAravind Ganesan 	if (a4xx_gpu->ocmem_base)
33223bd62fdSAravind Ganesan 		ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
33323bd62fdSAravind Ganesan #endif
33423bd62fdSAravind Ganesan 
33523bd62fdSAravind Ganesan 	kfree(a4xx_gpu);
33623bd62fdSAravind Ganesan }
33723bd62fdSAravind Ganesan 
338c4a8d475SJordan Crouse static bool a4xx_idle(struct msm_gpu *gpu)
33923bd62fdSAravind Ganesan {
34023bd62fdSAravind Ganesan 	/* wait for ringbuffer to drain: */
341f97decacSJordan Crouse 	if (!adreno_idle(gpu, gpu->rb[0]))
342c4a8d475SJordan Crouse 		return false;
34323bd62fdSAravind Ganesan 
34423bd62fdSAravind Ganesan 	/* then wait for GPU to finish: */
34523bd62fdSAravind Ganesan 	if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
346c4a8d475SJordan Crouse 					A4XX_RBBM_STATUS_GPU_BUSY))) {
34723bd62fdSAravind Ganesan 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
34823bd62fdSAravind Ganesan 		/* TODO maybe we need to reset GPU here to recover from hang? */
349c4a8d475SJordan Crouse 		return false;
350c4a8d475SJordan Crouse 	}
351c4a8d475SJordan Crouse 
352c4a8d475SJordan Crouse 	return true;
35323bd62fdSAravind Ganesan }
35423bd62fdSAravind Ganesan 
35523bd62fdSAravind Ganesan static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
35623bd62fdSAravind Ganesan {
35723bd62fdSAravind Ganesan 	uint32_t status;
35823bd62fdSAravind Ganesan 
35923bd62fdSAravind Ganesan 	status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
36023bd62fdSAravind Ganesan 	DBG("%s: Int status %08x", gpu->name, status);
36123bd62fdSAravind Ganesan 
3621e2c8e7aSCraig Stout 	if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
3631e2c8e7aSCraig Stout 		uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
3641e2c8e7aSCraig Stout 		printk("CP | Protected mode error| %s | addr=%x\n",
3651e2c8e7aSCraig Stout 			reg & (1 << 24) ? "WRITE" : "READ",
3661e2c8e7aSCraig Stout 			(reg & 0xFFFFF) >> 2);
3671e2c8e7aSCraig Stout 	}
3681e2c8e7aSCraig Stout 
36923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
37023bd62fdSAravind Ganesan 
37123bd62fdSAravind Ganesan 	msm_gpu_retire(gpu);
37223bd62fdSAravind Ganesan 
37323bd62fdSAravind Ganesan 	return IRQ_HANDLED;
37423bd62fdSAravind Ganesan }
37523bd62fdSAravind Ganesan 
37623bd62fdSAravind Ganesan static const unsigned int a4xx_registers[] = {
37723bd62fdSAravind Ganesan 	/* RBBM */
37823bd62fdSAravind Ganesan 	0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
37923bd62fdSAravind Ganesan 	0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
38023bd62fdSAravind Ganesan 	0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
38123bd62fdSAravind Ganesan 	/* CP */
38223bd62fdSAravind Ganesan 	0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
38323bd62fdSAravind Ganesan 	0x0578, 0x058F,
38423bd62fdSAravind Ganesan 	/* VSC */
38523bd62fdSAravind Ganesan 	0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
38623bd62fdSAravind Ganesan 	/* GRAS */
38723bd62fdSAravind Ganesan 	0x0C80, 0x0C81, 0x0C88, 0x0C8F,
38823bd62fdSAravind Ganesan 	/* RB */
38923bd62fdSAravind Ganesan 	0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
39023bd62fdSAravind Ganesan 	/* PC */
39123bd62fdSAravind Ganesan 	0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
39223bd62fdSAravind Ganesan 	/* VFD */
39323bd62fdSAravind Ganesan 	0x0E40, 0x0E4A,
39423bd62fdSAravind Ganesan 	/* VPC */
39523bd62fdSAravind Ganesan 	0x0E60, 0x0E61, 0x0E63, 0x0E68,
39623bd62fdSAravind Ganesan 	/* UCHE */
39723bd62fdSAravind Ganesan 	0x0E80, 0x0E84, 0x0E88, 0x0E95,
39823bd62fdSAravind Ganesan 	/* VMIDMT */
39923bd62fdSAravind Ganesan 	0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
40023bd62fdSAravind Ganesan 	0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
40123bd62fdSAravind Ganesan 	0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
40223bd62fdSAravind Ganesan 	0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
40323bd62fdSAravind Ganesan 	0x1380, 0x1380,
40423bd62fdSAravind Ganesan 	/* GRAS CTX 0 */
40523bd62fdSAravind Ganesan 	0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
40623bd62fdSAravind Ganesan 	/* PC CTX 0 */
40723bd62fdSAravind Ganesan 	0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
40823bd62fdSAravind Ganesan 	/* VFD CTX 0 */
40923bd62fdSAravind Ganesan 	0x2200, 0x2204, 0x2208, 0x22A9,
41023bd62fdSAravind Ganesan 	/* GRAS CTX 1 */
41123bd62fdSAravind Ganesan 	0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
41223bd62fdSAravind Ganesan 	/* PC CTX 1 */
41323bd62fdSAravind Ganesan 	0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
41423bd62fdSAravind Ganesan 	/* VFD CTX 1 */
41523bd62fdSAravind Ganesan 	0x2600, 0x2604, 0x2608, 0x26A9,
41623bd62fdSAravind Ganesan 	/* XPU */
41723bd62fdSAravind Ganesan 	0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
41823bd62fdSAravind Ganesan 	0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
41923bd62fdSAravind Ganesan 	0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
42023bd62fdSAravind Ganesan 	/* VBIF */
42123bd62fdSAravind Ganesan 	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
42223bd62fdSAravind Ganesan 	0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
42323bd62fdSAravind Ganesan 	0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
42423bd62fdSAravind Ganesan 	0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
42523bd62fdSAravind Ganesan 	0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
42623bd62fdSAravind Ganesan 	0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
42723bd62fdSAravind Ganesan 	0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
42823bd62fdSAravind Ganesan 	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
42923bd62fdSAravind Ganesan 	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
43023bd62fdSAravind Ganesan 	0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
43123bd62fdSAravind Ganesan 	0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
43223bd62fdSAravind Ganesan 	0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
43323bd62fdSAravind Ganesan 	0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
43423bd62fdSAravind Ganesan 	0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
43523bd62fdSAravind Ganesan 	0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
43623bd62fdSAravind Ganesan 	0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
43723bd62fdSAravind Ganesan 	0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
43823bd62fdSAravind Ganesan 	0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
43923bd62fdSAravind Ganesan 	0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
44023bd62fdSAravind Ganesan 	0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
44123bd62fdSAravind Ganesan 	0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
44223bd62fdSAravind Ganesan 	0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
44323bd62fdSAravind Ganesan 	0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
44423bd62fdSAravind Ganesan 	0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
44523bd62fdSAravind Ganesan 	0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
44623bd62fdSAravind Ganesan 	0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
44723bd62fdSAravind Ganesan 	0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
44823bd62fdSAravind Ganesan 	0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
44923bd62fdSAravind Ganesan 	0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
45023bd62fdSAravind Ganesan 	0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
45123bd62fdSAravind Ganesan 	0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
45223bd62fdSAravind Ganesan 	0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
45323bd62fdSAravind Ganesan 	0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
45423bd62fdSAravind Ganesan 	0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
45523bd62fdSAravind Ganesan 	~0 /* sentinel */
45623bd62fdSAravind Ganesan };
45723bd62fdSAravind Ganesan 
458e00e473dSJordan Crouse static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
459e00e473dSJordan Crouse {
460e00e473dSJordan Crouse 	struct msm_gpu_state *state = adreno_gpu_state_get(gpu);
461e00e473dSJordan Crouse 
462e00e473dSJordan Crouse 	if (IS_ERR(state))
463e00e473dSJordan Crouse 		return state;
464e00e473dSJordan Crouse 
465e00e473dSJordan Crouse 	state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
466e00e473dSJordan Crouse 
467e00e473dSJordan Crouse 	return state;
468e00e473dSJordan Crouse }
469e00e473dSJordan Crouse 
47023bd62fdSAravind Ganesan /* Register offset defines for A4XX, in order of enum adreno_regs */
47123bd62fdSAravind Ganesan static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
47223bd62fdSAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
473fb039981SJordan Crouse 	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
47423bd62fdSAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
475fb039981SJordan Crouse 	REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
47623bd62fdSAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
47723bd62fdSAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
47823bd62fdSAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
47923bd62fdSAravind Ganesan };
48023bd62fdSAravind Ganesan 
48123bd62fdSAravind Ganesan static void a4xx_dump(struct msm_gpu *gpu)
48223bd62fdSAravind Ganesan {
48323bd62fdSAravind Ganesan 	printk("status:   %08x\n",
48423bd62fdSAravind Ganesan 			gpu_read(gpu, REG_A4XX_RBBM_STATUS));
48523bd62fdSAravind Ganesan 	adreno_dump(gpu);
48623bd62fdSAravind Ganesan }
48723bd62fdSAravind Ganesan 
48838bbc55eSCraig Stout static int a4xx_pm_resume(struct msm_gpu *gpu) {
48938bbc55eSCraig Stout 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
49038bbc55eSCraig Stout 	int ret;
49138bbc55eSCraig Stout 
49238bbc55eSCraig Stout 	ret = msm_gpu_pm_resume(gpu);
49338bbc55eSCraig Stout 	if (ret)
49438bbc55eSCraig Stout 		return ret;
49538bbc55eSCraig Stout 
49638bbc55eSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
49738bbc55eSCraig Stout 		unsigned int reg;
49838bbc55eSCraig Stout 		/* Set the default register values; set SW_COLLAPSE to 0 */
49938bbc55eSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
50038bbc55eSCraig Stout 		do {
50138bbc55eSCraig Stout 			udelay(5);
50238bbc55eSCraig Stout 			reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
50338bbc55eSCraig Stout 		} while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
50438bbc55eSCraig Stout 	}
50538bbc55eSCraig Stout 	return 0;
50638bbc55eSCraig Stout }
50738bbc55eSCraig Stout 
50838bbc55eSCraig Stout static int a4xx_pm_suspend(struct msm_gpu *gpu) {
50938bbc55eSCraig Stout 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
51038bbc55eSCraig Stout 	int ret;
51138bbc55eSCraig Stout 
51238bbc55eSCraig Stout 	ret = msm_gpu_pm_suspend(gpu);
51338bbc55eSCraig Stout 	if (ret)
51438bbc55eSCraig Stout 		return ret;
51538bbc55eSCraig Stout 
51638bbc55eSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
51738bbc55eSCraig Stout 		/* Set the default register values; set SW_COLLAPSE to 1 */
51838bbc55eSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
51938bbc55eSCraig Stout 	}
52038bbc55eSCraig Stout 	return 0;
52138bbc55eSCraig Stout }
52238bbc55eSCraig Stout 
5236c77d1abSRob Clark static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
5246c77d1abSRob Clark {
525ae53a829SJordan Crouse 	*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
526ae53a829SJordan Crouse 		REG_A4XX_RBBM_PERFCTR_CP_0_HI);
5276c77d1abSRob Clark 
5286c77d1abSRob Clark 	return 0;
5296c77d1abSRob Clark }
5306c77d1abSRob Clark 
53123bd62fdSAravind Ganesan static const struct adreno_gpu_funcs funcs = {
53223bd62fdSAravind Ganesan 	.base = {
53323bd62fdSAravind Ganesan 		.get_param = adreno_get_param,
53423bd62fdSAravind Ganesan 		.hw_init = a4xx_hw_init,
53538bbc55eSCraig Stout 		.pm_suspend = a4xx_pm_suspend,
53638bbc55eSCraig Stout 		.pm_resume = a4xx_pm_resume,
53723bd62fdSAravind Ganesan 		.recover = a4xx_recover,
53823bd62fdSAravind Ganesan 		.submit = adreno_submit,
53923bd62fdSAravind Ganesan 		.flush = adreno_flush,
540f97decacSJordan Crouse 		.active_ring = adreno_active_ring,
54123bd62fdSAravind Ganesan 		.irq = a4xx_irq,
54223bd62fdSAravind Ganesan 		.destroy = a4xx_destroy,
54323bd62fdSAravind Ganesan #ifdef CONFIG_DEBUG_FS
544*4f776f45SJordan Crouse 		.show = adreno_show,
54523bd62fdSAravind Ganesan #endif
546e00e473dSJordan Crouse 		.gpu_state_get = a4xx_gpu_state_get,
547e00e473dSJordan Crouse 		.gpu_state_put = adreno_gpu_state_put,
54823bd62fdSAravind Ganesan 	},
5496c77d1abSRob Clark 	.get_timestamp = a4xx_get_timestamp,
55023bd62fdSAravind Ganesan };
55123bd62fdSAravind Ganesan 
55223bd62fdSAravind Ganesan struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
55323bd62fdSAravind Ganesan {
55423bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = NULL;
55523bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu;
55623bd62fdSAravind Ganesan 	struct msm_gpu *gpu;
55723bd62fdSAravind Ganesan 	struct msm_drm_private *priv = dev->dev_private;
55823bd62fdSAravind Ganesan 	struct platform_device *pdev = priv->gpu_pdev;
55923bd62fdSAravind Ganesan 	int ret;
56023bd62fdSAravind Ganesan 
56123bd62fdSAravind Ganesan 	if (!pdev) {
56223bd62fdSAravind Ganesan 		dev_err(dev->dev, "no a4xx device\n");
56323bd62fdSAravind Ganesan 		ret = -ENXIO;
56423bd62fdSAravind Ganesan 		goto fail;
56523bd62fdSAravind Ganesan 	}
56623bd62fdSAravind Ganesan 
56723bd62fdSAravind Ganesan 	a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
56823bd62fdSAravind Ganesan 	if (!a4xx_gpu) {
56923bd62fdSAravind Ganesan 		ret = -ENOMEM;
57023bd62fdSAravind Ganesan 		goto fail;
57123bd62fdSAravind Ganesan 	}
57223bd62fdSAravind Ganesan 
57323bd62fdSAravind Ganesan 	adreno_gpu = &a4xx_gpu->base;
57423bd62fdSAravind Ganesan 	gpu = &adreno_gpu->base;
57523bd62fdSAravind Ganesan 
57623bd62fdSAravind Ganesan 	gpu->perfcntrs = NULL;
57723bd62fdSAravind Ganesan 	gpu->num_perfcntrs = 0;
57823bd62fdSAravind Ganesan 
57923bd62fdSAravind Ganesan 	adreno_gpu->registers = a4xx_registers;
58023bd62fdSAravind Ganesan 	adreno_gpu->reg_offsets = a4xx_register_offsets;
58123bd62fdSAravind Ganesan 
582f97decacSJordan Crouse 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
58323bd62fdSAravind Ganesan 	if (ret)
58423bd62fdSAravind Ganesan 		goto fail;
58523bd62fdSAravind Ganesan 
58623bd62fdSAravind Ganesan 	/* if needed, allocate gmem: */
58723bd62fdSAravind Ganesan 	if (adreno_is_a4xx(adreno_gpu)) {
58823bd62fdSAravind Ganesan #ifdef CONFIG_MSM_OCMEM
58923bd62fdSAravind Ganesan 		/* TODO this is different/missing upstream: */
59023bd62fdSAravind Ganesan 		struct ocmem_buf *ocmem_hdl =
59123bd62fdSAravind Ganesan 				ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
59223bd62fdSAravind Ganesan 
59323bd62fdSAravind Ganesan 		a4xx_gpu->ocmem_hdl = ocmem_hdl;
59423bd62fdSAravind Ganesan 		a4xx_gpu->ocmem_base = ocmem_hdl->addr;
59523bd62fdSAravind Ganesan 		adreno_gpu->gmem = ocmem_hdl->len;
59623bd62fdSAravind Ganesan 		DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
59723bd62fdSAravind Ganesan 				a4xx_gpu->ocmem_base);
59823bd62fdSAravind Ganesan #endif
59923bd62fdSAravind Ganesan 	}
60023bd62fdSAravind Ganesan 
601667ce33eSRob Clark 	if (!gpu->aspace) {
60223bd62fdSAravind Ganesan 		/* TODO we think it is possible to configure the GPU to
60323bd62fdSAravind Ganesan 		 * restrict access to VRAM carveout.  But the required
60423bd62fdSAravind Ganesan 		 * registers are unknown.  For now just bail out and
60523bd62fdSAravind Ganesan 		 * limp along with just modesetting.  If it turns out
60623bd62fdSAravind Ganesan 		 * to not be possible to restrict access, then we must
60723bd62fdSAravind Ganesan 		 * implement a cmdstream validator.
60823bd62fdSAravind Ganesan 		 */
60923bd62fdSAravind Ganesan 		dev_err(dev->dev, "No memory protection without IOMMU\n");
61023bd62fdSAravind Ganesan 		ret = -ENXIO;
61123bd62fdSAravind Ganesan 		goto fail;
61223bd62fdSAravind Ganesan 	}
61323bd62fdSAravind Ganesan 
61423bd62fdSAravind Ganesan 	return gpu;
61523bd62fdSAravind Ganesan 
61623bd62fdSAravind Ganesan fail:
61723bd62fdSAravind Ganesan 	if (a4xx_gpu)
61823bd62fdSAravind Ganesan 		a4xx_destroy(&a4xx_gpu->base.base);
61923bd62fdSAravind Ganesan 
62023bd62fdSAravind Ganesan 	return ERR_PTR(ret);
62123bd62fdSAravind Ganesan }
622