xref: /linux/drivers/gpu/drm/msm/adreno/a4xx_gpu.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* Copyright (c) 2014 The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  */
13 #include "a4xx_gpu.h"
14 #ifdef CONFIG_MSM_OCMEM
15 #  include <soc/qcom/ocmem.h>
16 #endif
17 
18 #define A4XX_INT0_MASK \
19 	(A4XX_INT0_RBBM_AHB_ERROR |        \
20 	 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
21 	 A4XX_INT0_CP_T0_PACKET_IN_IB |    \
22 	 A4XX_INT0_CP_OPCODE_ERROR |       \
23 	 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
24 	 A4XX_INT0_CP_HW_FAULT |           \
25 	 A4XX_INT0_CP_IB1_INT |            \
26 	 A4XX_INT0_CP_IB2_INT |            \
27 	 A4XX_INT0_CP_RB_INT |             \
28 	 A4XX_INT0_CP_REG_PROTECT_FAULT |  \
29 	 A4XX_INT0_CP_AHB_ERROR_HALT |     \
30 	 A4XX_INT0_UCHE_OOB_ACCESS)
31 
32 extern bool hang_debug;
33 static void a4xx_dump(struct msm_gpu *gpu);
34 
35 /*
36  * a4xx_enable_hwcg() - Program the clock control registers
37  * @device: The adreno device pointer
38  */
39 static void a4xx_enable_hwcg(struct msm_gpu *gpu)
40 {
41 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
42 	unsigned int i;
43 	for (i = 0; i < 4; i++)
44 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
45 	for (i = 0; i < 4; i++)
46 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
47 	for (i = 0; i < 4; i++)
48 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
49 	for (i = 0; i < 4; i++)
50 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
51 	for (i = 0; i < 4; i++)
52 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
53 	for (i = 0; i < 4; i++)
54 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
55 	for (i = 0; i < 4; i++)
56 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
57 	for (i = 0; i < 4; i++)
58 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
59 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
60 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
61 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
62 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
63 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
64 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
65 	for (i = 0; i < 4; i++)
66 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
67 
68 	/* Disable L1 clocking in A420 due to CCU issues with it */
69 	for (i = 0; i < 4; i++) {
70 		if (adreno_is_a420(adreno_gpu)) {
71 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
72 					0x00002020);
73 		} else {
74 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
75 					0x00022020);
76 		}
77 	}
78 
79 	for (i = 0; i < 4; i++) {
80 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
81 				0x00000922);
82 	}
83 
84 	for (i = 0; i < 4; i++) {
85 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
86 				0x00000000);
87 	}
88 
89 	for (i = 0; i < 4; i++) {
90 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
91 				0x00000001);
92 	}
93 
94 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
95 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
96 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
97 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
98 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
99 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
100 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
101 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
102 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
103 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
104 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
105 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
106 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
107 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
108 }
109 
110 static void a4xx_me_init(struct msm_gpu *gpu)
111 {
112 	struct msm_ringbuffer *ring = gpu->rb;
113 
114 	OUT_PKT3(ring, CP_ME_INIT, 17);
115 	OUT_RING(ring, 0x000003f7);
116 	OUT_RING(ring, 0x00000000);
117 	OUT_RING(ring, 0x00000000);
118 	OUT_RING(ring, 0x00000000);
119 	OUT_RING(ring, 0x00000080);
120 	OUT_RING(ring, 0x00000100);
121 	OUT_RING(ring, 0x00000180);
122 	OUT_RING(ring, 0x00006600);
123 	OUT_RING(ring, 0x00000150);
124 	OUT_RING(ring, 0x0000014e);
125 	OUT_RING(ring, 0x00000154);
126 	OUT_RING(ring, 0x00000001);
127 	OUT_RING(ring, 0x00000000);
128 	OUT_RING(ring, 0x00000000);
129 	OUT_RING(ring, 0x00000000);
130 	OUT_RING(ring, 0x00000000);
131 	OUT_RING(ring, 0x00000000);
132 
133 	gpu->funcs->flush(gpu);
134 	gpu->funcs->idle(gpu);
135 }
136 
137 static int a4xx_hw_init(struct msm_gpu *gpu)
138 {
139 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
141 	uint32_t *ptr, len;
142 	int i, ret;
143 
144 	if (adreno_is_a4xx(adreno_gpu)) {
145 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
146 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
147 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
148 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
149 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
150 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
151 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
152 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
153 	} else {
154 		BUG();
155 	}
156 
157 	/* Make all blocks contribute to the GPU BUSY perf counter */
158 	gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
159 
160 	/* Tune the hystersis counters for SP and CP idle detection */
161 	gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
162 	gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
163 
164 	 /* Enable the RBBM error reporting bits */
165 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
166 
167 	/* Enable AHB error reporting*/
168 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
169 
170 	/* Enable power counters*/
171 	gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
172 
173 	/*
174 	 * Turn on hang detection - this spews a lot of useful information
175 	 * into the RBBM registers on a hang:
176 	 */
177 	gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
178 			(1 << 30) | 0xFFFF);
179 
180 	gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
181 			(unsigned int)(a4xx_gpu->ocmem_base >> 14));
182 
183 	/* Turn on performance counters: */
184 	gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
185 
186 	/* Disable L2 bypass to avoid UCHE out of bounds errors */
187 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
188 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
189 
190 	gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
191 			(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
192 
193 	a4xx_enable_hwcg(gpu);
194 
195 	/*
196 	 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
197 	 * due to timing issue with HLSQ_TP_CLK_EN
198 	 */
199 	if (adreno_is_a420(adreno_gpu)) {
200 		unsigned int val;
201 		val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
202 		val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
203 		val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
204 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
205 	}
206 
207 	ret = adreno_hw_init(gpu);
208 	if (ret)
209 		return ret;
210 
211 	/* setup access protection: */
212 	gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
213 
214 	/* RBBM registers */
215 	gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
216 	gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
217 	gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
218 	gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
219 	gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
220 	gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
221 
222 	/* CP registers */
223 	gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
224 	gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
225 
226 
227 	/* RB registers */
228 	gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
229 
230 	/* HLSQ registers */
231 	gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
232 
233 	/* VPC registers */
234 	gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
235 
236 	/* SMMU registers */
237 	gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
238 
239 	gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
240 
241 	ret = adreno_hw_init(gpu);
242 	if (ret)
243 		return ret;
244 
245 	/* Load PM4: */
246 	ptr = (uint32_t *)(adreno_gpu->pm4->data);
247 	len = adreno_gpu->pm4->size / 4;
248 	DBG("loading PM4 ucode version: %u", ptr[0]);
249 	gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
250 	for (i = 1; i < len; i++)
251 		gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
252 
253 	/* Load PFP: */
254 	ptr = (uint32_t *)(adreno_gpu->pfp->data);
255 	len = adreno_gpu->pfp->size / 4;
256 	DBG("loading PFP ucode version: %u", ptr[0]);
257 
258 	gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
259 	for (i = 1; i < len; i++)
260 		gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
261 
262 	/* clear ME_HALT to start micro engine */
263 	gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
264 
265 	a4xx_me_init(gpu);
266 	return 0;
267 }
268 
269 static void a4xx_recover(struct msm_gpu *gpu)
270 {
271 	adreno_dump_info(gpu);
272 
273 	/* dump registers before resetting gpu, if enabled: */
274 	if (hang_debug)
275 		a4xx_dump(gpu);
276 
277 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
278 	gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
279 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
280 	adreno_recover(gpu);
281 }
282 
283 static void a4xx_destroy(struct msm_gpu *gpu)
284 {
285 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
286 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
287 
288 	DBG("%s", gpu->name);
289 
290 	adreno_gpu_cleanup(adreno_gpu);
291 
292 #ifdef CONFIG_MSM_OCMEM
293 	if (a4xx_gpu->ocmem_base)
294 		ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
295 #endif
296 
297 	kfree(a4xx_gpu);
298 }
299 
300 static void a4xx_idle(struct msm_gpu *gpu)
301 {
302 	/* wait for ringbuffer to drain: */
303 	adreno_idle(gpu);
304 
305 	/* then wait for GPU to finish: */
306 	if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
307 					A4XX_RBBM_STATUS_GPU_BUSY)))
308 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
309 
310 	/* TODO maybe we need to reset GPU here to recover from hang? */
311 }
312 
313 static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
314 {
315 	uint32_t status;
316 
317 	status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
318 	DBG("%s: Int status %08x", gpu->name, status);
319 
320 	gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
321 
322 	msm_gpu_retire(gpu);
323 
324 	return IRQ_HANDLED;
325 }
326 
327 static const unsigned int a4xx_registers[] = {
328 	/* RBBM */
329 	0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
330 	0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
331 	0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
332 	/* CP */
333 	0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
334 	0x0578, 0x058F,
335 	/* VSC */
336 	0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
337 	/* GRAS */
338 	0x0C80, 0x0C81, 0x0C88, 0x0C8F,
339 	/* RB */
340 	0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
341 	/* PC */
342 	0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
343 	/* VFD */
344 	0x0E40, 0x0E4A,
345 	/* VPC */
346 	0x0E60, 0x0E61, 0x0E63, 0x0E68,
347 	/* UCHE */
348 	0x0E80, 0x0E84, 0x0E88, 0x0E95,
349 	/* VMIDMT */
350 	0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
351 	0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
352 	0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
353 	0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
354 	0x1380, 0x1380,
355 	/* GRAS CTX 0 */
356 	0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
357 	/* PC CTX 0 */
358 	0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
359 	/* VFD CTX 0 */
360 	0x2200, 0x2204, 0x2208, 0x22A9,
361 	/* GRAS CTX 1 */
362 	0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
363 	/* PC CTX 1 */
364 	0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
365 	/* VFD CTX 1 */
366 	0x2600, 0x2604, 0x2608, 0x26A9,
367 	/* XPU */
368 	0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
369 	0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
370 	0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
371 	/* VBIF */
372 	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
373 	0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
374 	0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
375 	0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
376 	0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
377 	0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
378 	0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
379 	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
380 	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
381 	0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
382 	0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
383 	0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
384 	0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
385 	0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
386 	0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
387 	0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
388 	0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
389 	0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
390 	0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
391 	0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
392 	0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
393 	0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
394 	0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
395 	0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
396 	0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
397 	0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
398 	0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
399 	0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
400 	0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
401 	0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
402 	0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
403 	0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
404 	0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
405 	0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
406 	~0 /* sentinel */
407 };
408 
409 #ifdef CONFIG_DEBUG_FS
410 static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
411 {
412 	gpu->funcs->pm_resume(gpu);
413 
414 	seq_printf(m, "status:   %08x\n",
415 			gpu_read(gpu, REG_A4XX_RBBM_STATUS));
416 	gpu->funcs->pm_suspend(gpu);
417 
418 	adreno_show(gpu, m);
419 
420 }
421 #endif
422 
423 /* Register offset defines for A4XX, in order of enum adreno_regs */
424 static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
425 	REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
426 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
427 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
428 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
429 			REG_A4XX_CP_PFP_UCODE_DATA),
430 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
431 			REG_A4XX_CP_PFP_UCODE_ADDR),
432 	REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
433 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
434 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
435 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
436 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
437 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
438 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
439 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
440 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
441 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
442 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
443 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
444 	REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
445 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
446 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
447 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
448 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
449 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
450 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
451 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
452 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
453 	REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
454 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
455 			REG_A4XX_CP_PROTECT_STATUS),
456 	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
457 	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
458 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
459 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
460 			REG_A4XX_RBBM_PERFCTR_CTL),
461 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
462 			REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
463 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
464 			REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
465 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
466 			REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
467 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
468 			REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
469 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
470 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
471 			REG_A4XX_RBBM_INT_0_STATUS),
472 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
473 			REG_A4XX_RBBM_AHB_ERROR_STATUS),
474 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
475 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
476 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
477 			REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
478 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
479 			REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
480 	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
481 			REG_A4XX_VPC_DEBUG_RAM_SEL),
482 	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
483 			REG_A4XX_VPC_DEBUG_RAM_READ),
484 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
485 			REG_A4XX_RBBM_INT_CLEAR_CMD),
486 	REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
487 			REG_A4XX_VSC_SIZE_ADDRESS),
488 	REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
489 	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
490 			REG_A4XX_SP_VS_PVT_MEM_ADDR),
491 	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
492 			REG_A4XX_SP_FS_PVT_MEM_ADDR),
493 	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
494 			REG_A4XX_SP_VS_OBJ_START),
495 	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
496 			REG_A4XX_SP_FS_OBJ_START),
497 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
498 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
499 			REG_A4XX_RBBM_SW_RESET_CMD),
500 	REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
501 			REG_A4XX_UCHE_INVALIDATE0),
502 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
503 			REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
504 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
505 			REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
506 };
507 
508 static void a4xx_dump(struct msm_gpu *gpu)
509 {
510 	printk("status:   %08x\n",
511 			gpu_read(gpu, REG_A4XX_RBBM_STATUS));
512 	adreno_dump(gpu);
513 }
514 
515 static const struct adreno_gpu_funcs funcs = {
516 	.base = {
517 		.get_param = adreno_get_param,
518 		.hw_init = a4xx_hw_init,
519 		.pm_suspend = msm_gpu_pm_suspend,
520 		.pm_resume = msm_gpu_pm_resume,
521 		.recover = a4xx_recover,
522 		.last_fence = adreno_last_fence,
523 		.submit = adreno_submit,
524 		.flush = adreno_flush,
525 		.idle = a4xx_idle,
526 		.irq = a4xx_irq,
527 		.destroy = a4xx_destroy,
528 #ifdef CONFIG_DEBUG_FS
529 		.show = a4xx_show,
530 #endif
531 	},
532 };
533 
534 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
535 {
536 	struct a4xx_gpu *a4xx_gpu = NULL;
537 	struct adreno_gpu *adreno_gpu;
538 	struct msm_gpu *gpu;
539 	struct msm_drm_private *priv = dev->dev_private;
540 	struct platform_device *pdev = priv->gpu_pdev;
541 	int ret;
542 
543 	if (!pdev) {
544 		dev_err(dev->dev, "no a4xx device\n");
545 		ret = -ENXIO;
546 		goto fail;
547 	}
548 
549 	a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
550 	if (!a4xx_gpu) {
551 		ret = -ENOMEM;
552 		goto fail;
553 	}
554 
555 	adreno_gpu = &a4xx_gpu->base;
556 	gpu = &adreno_gpu->base;
557 
558 	a4xx_gpu->pdev = pdev;
559 
560 	gpu->perfcntrs = NULL;
561 	gpu->num_perfcntrs = 0;
562 
563 	adreno_gpu->registers = a4xx_registers;
564 	adreno_gpu->reg_offsets = a4xx_register_offsets;
565 
566 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
567 	if (ret)
568 		goto fail;
569 
570 	/* if needed, allocate gmem: */
571 	if (adreno_is_a4xx(adreno_gpu)) {
572 #ifdef CONFIG_MSM_OCMEM
573 		/* TODO this is different/missing upstream: */
574 		struct ocmem_buf *ocmem_hdl =
575 				ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
576 
577 		a4xx_gpu->ocmem_hdl = ocmem_hdl;
578 		a4xx_gpu->ocmem_base = ocmem_hdl->addr;
579 		adreno_gpu->gmem = ocmem_hdl->len;
580 		DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
581 				a4xx_gpu->ocmem_base);
582 #endif
583 	}
584 
585 	if (!gpu->mmu) {
586 		/* TODO we think it is possible to configure the GPU to
587 		 * restrict access to VRAM carveout.  But the required
588 		 * registers are unknown.  For now just bail out and
589 		 * limp along with just modesetting.  If it turns out
590 		 * to not be possible to restrict access, then we must
591 		 * implement a cmdstream validator.
592 		 */
593 		dev_err(dev->dev, "No memory protection without IOMMU\n");
594 		ret = -ENXIO;
595 		goto fail;
596 	}
597 
598 	return gpu;
599 
600 fail:
601 	if (a4xx_gpu)
602 		a4xx_destroy(&a4xx_gpu->base.base);
603 
604 	return ERR_PTR(ret);
605 }
606