xref: /linux/drivers/gpu/drm/msm/adreno/a6xx_gpu.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3 
4 
5 #include "msm_gem.h"
6 #include "msm_mmu.h"
7 #include "msm_gpu_trace.h"
8 #include "a6xx_gpu.h"
9 #include "a6xx_gmu.xml.h"
10 
11 #include <linux/bitfield.h>
12 #include <linux/devfreq.h>
13 #include <linux/pm_domain.h>
14 #include <linux/soc/qcom/llcc-qcom.h>
15 
16 #define GPU_PAS_ID 13
17 
18 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
19 {
20 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
21 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
22 
23 	/* Check that the GMU is idle */
24 	if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
25 		return false;
26 
27 	/* Check tha the CX master is idle */
28 	if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
29 			~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
30 		return false;
31 
32 	return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
33 		A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
34 }
35 
36 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
37 {
38 	/* wait for CP to drain ringbuffer: */
39 	if (!adreno_idle(gpu, ring))
40 		return false;
41 
42 	if (spin_until(_a6xx_check_idle(gpu))) {
43 		DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
44 			gpu->name, __builtin_return_address(0),
45 			gpu_read(gpu, REG_A6XX_RBBM_STATUS),
46 			gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
47 			gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
48 			gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
49 		return false;
50 	}
51 
52 	return true;
53 }
54 
55 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
56 {
57 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
58 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
59 
60 	/* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
61 	if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
62 		OUT_PKT7(ring, CP_WHERE_AM_I, 2);
63 		OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
64 		OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
65 	}
66 }
67 
68 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
69 {
70 	uint32_t wptr;
71 	unsigned long flags;
72 
73 	update_shadow_rptr(gpu, ring);
74 
75 	spin_lock_irqsave(&ring->preempt_lock, flags);
76 
77 	/* Copy the shadow to the actual register */
78 	ring->cur = ring->next;
79 
80 	/* Make sure to wrap wptr if we need to */
81 	wptr = get_wptr(ring);
82 
83 	spin_unlock_irqrestore(&ring->preempt_lock, flags);
84 
85 	/* Make sure everything is posted before making a decision */
86 	mb();
87 
88 	gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
89 }
90 
91 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
92 		u64 iova)
93 {
94 	OUT_PKT7(ring, CP_REG_TO_MEM, 3);
95 	OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
96 		CP_REG_TO_MEM_0_CNT(2) |
97 		CP_REG_TO_MEM_0_64B);
98 	OUT_RING(ring, lower_32_bits(iova));
99 	OUT_RING(ring, upper_32_bits(iova));
100 }
101 
102 static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
103 		struct msm_ringbuffer *ring, struct msm_file_private *ctx)
104 {
105 	bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
106 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
107 	phys_addr_t ttbr;
108 	u32 asid;
109 	u64 memptr = rbmemptr(ring, ttbr0);
110 
111 	if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
112 		return;
113 
114 	if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
115 		return;
116 
117 	if (!sysprof) {
118 		if (!adreno_is_a7xx(adreno_gpu)) {
119 			/* Turn off protected mode to write to special registers */
120 			OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
121 			OUT_RING(ring, 0);
122 		}
123 
124 		OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
125 		OUT_RING(ring, 1);
126 	}
127 
128 	/* Execute the table update */
129 	OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
130 	OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
131 
132 	OUT_RING(ring,
133 		CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
134 		CP_SMMU_TABLE_UPDATE_1_ASID(asid));
135 	OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
136 	OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
137 
138 	/*
139 	 * Write the new TTBR0 to the memstore. This is good for debugging.
140 	 */
141 	OUT_PKT7(ring, CP_MEM_WRITE, 4);
142 	OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
143 	OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
144 	OUT_RING(ring, lower_32_bits(ttbr));
145 	OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
146 
147 	/*
148 	 * Sync both threads after switching pagetables and enable BR only
149 	 * to make sure BV doesn't race ahead while BR is still switching
150 	 * pagetables.
151 	 */
152 	if (adreno_is_a7xx(&a6xx_gpu->base)) {
153 		OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
154 		OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
155 	}
156 
157 	/*
158 	 * And finally, trigger a uche flush to be sure there isn't anything
159 	 * lingering in that part of the GPU
160 	 */
161 
162 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
163 	OUT_RING(ring, CACHE_INVALIDATE);
164 
165 	if (!sysprof) {
166 		/*
167 		 * Wait for SRAM clear after the pgtable update, so the
168 		 * two can happen in parallel:
169 		 */
170 		OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
171 		OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
172 		OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
173 				REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
174 		OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
175 		OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
176 		OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
177 		OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
178 
179 		if (!adreno_is_a7xx(adreno_gpu)) {
180 			/* Re-enable protected mode: */
181 			OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
182 			OUT_RING(ring, 1);
183 		}
184 	}
185 }
186 
187 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
188 {
189 	unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
190 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
191 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
192 	struct msm_ringbuffer *ring = submit->ring;
193 	unsigned int i, ibs = 0;
194 
195 	a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
196 
197 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
198 		rbmemptr_stats(ring, index, cpcycles_start));
199 
200 	/*
201 	 * For PM4 the GMU register offsets are calculated from the base of the
202 	 * GPU registers so we need to add 0x1a800 to the register value on A630
203 	 * to get the right value from PM4.
204 	 */
205 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
206 		rbmemptr_stats(ring, index, alwayson_start));
207 
208 	/* Invalidate CCU depth and color */
209 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
210 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
211 
212 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
213 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
214 
215 	/* Submit the commands */
216 	for (i = 0; i < submit->nr_cmds; i++) {
217 		switch (submit->cmd[i].type) {
218 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
219 			break;
220 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
221 			if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
222 				break;
223 			fallthrough;
224 		case MSM_SUBMIT_CMD_BUF:
225 			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
226 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
227 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
228 			OUT_RING(ring, submit->cmd[i].size);
229 			ibs++;
230 			break;
231 		}
232 
233 		/*
234 		 * Periodically update shadow-wptr if needed, so that we
235 		 * can see partial progress of submits with large # of
236 		 * cmds.. otherwise we could needlessly stall waiting for
237 		 * ringbuffer state, simply due to looking at a shadow
238 		 * rptr value that has not been updated
239 		 */
240 		if ((ibs % 32) == 0)
241 			update_shadow_rptr(gpu, ring);
242 	}
243 
244 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
245 		rbmemptr_stats(ring, index, cpcycles_end));
246 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
247 		rbmemptr_stats(ring, index, alwayson_end));
248 
249 	/* Write the fence to the scratch register */
250 	OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
251 	OUT_RING(ring, submit->seqno);
252 
253 	/*
254 	 * Execute a CACHE_FLUSH_TS event. This will ensure that the
255 	 * timestamp is written to the memory and then triggers the interrupt
256 	 */
257 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
258 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
259 		CP_EVENT_WRITE_0_IRQ);
260 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
261 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
262 	OUT_RING(ring, submit->seqno);
263 
264 	trace_msm_gpu_submit_flush(submit,
265 		gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
266 
267 	a6xx_flush(gpu, ring);
268 }
269 
270 static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
271 {
272 	unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
273 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
274 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
275 	struct msm_ringbuffer *ring = submit->ring;
276 	unsigned int i, ibs = 0;
277 
278 	/*
279 	 * Toggle concurrent binning for pagetable switch and set the thread to
280 	 * BR since only it can execute the pagetable switch packets.
281 	 */
282 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
283 	OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
284 
285 	a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
286 
287 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
288 		rbmemptr_stats(ring, index, cpcycles_start));
289 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
290 		rbmemptr_stats(ring, index, alwayson_start));
291 
292 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
293 	OUT_RING(ring, CP_SET_THREAD_BOTH);
294 
295 	OUT_PKT7(ring, CP_SET_MARKER, 1);
296 	OUT_RING(ring, 0x101); /* IFPC disable */
297 
298 	OUT_PKT7(ring, CP_SET_MARKER, 1);
299 	OUT_RING(ring, 0x00d); /* IB1LIST start */
300 
301 	/* Submit the commands */
302 	for (i = 0; i < submit->nr_cmds; i++) {
303 		switch (submit->cmd[i].type) {
304 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
305 			break;
306 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
307 			if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
308 				break;
309 			fallthrough;
310 		case MSM_SUBMIT_CMD_BUF:
311 			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
312 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
313 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
314 			OUT_RING(ring, submit->cmd[i].size);
315 			ibs++;
316 			break;
317 		}
318 
319 		/*
320 		 * Periodically update shadow-wptr if needed, so that we
321 		 * can see partial progress of submits with large # of
322 		 * cmds.. otherwise we could needlessly stall waiting for
323 		 * ringbuffer state, simply due to looking at a shadow
324 		 * rptr value that has not been updated
325 		 */
326 		if ((ibs % 32) == 0)
327 			update_shadow_rptr(gpu, ring);
328 	}
329 
330 	OUT_PKT7(ring, CP_SET_MARKER, 1);
331 	OUT_RING(ring, 0x00e); /* IB1LIST end */
332 
333 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
334 		rbmemptr_stats(ring, index, cpcycles_end));
335 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
336 		rbmemptr_stats(ring, index, alwayson_end));
337 
338 	/* Write the fence to the scratch register */
339 	OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
340 	OUT_RING(ring, submit->seqno);
341 
342 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
343 	OUT_RING(ring, CP_SET_THREAD_BR);
344 
345 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
346 	OUT_RING(ring, CCU_INVALIDATE_DEPTH);
347 
348 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
349 	OUT_RING(ring, CCU_INVALIDATE_COLOR);
350 
351 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
352 	OUT_RING(ring, CP_SET_THREAD_BV);
353 
354 	/*
355 	 * Make sure the timestamp is committed once BV pipe is
356 	 * completely done with this submission.
357 	 */
358 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
359 	OUT_RING(ring, CACHE_CLEAN | BIT(27));
360 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
361 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
362 	OUT_RING(ring, submit->seqno);
363 
364 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
365 	OUT_RING(ring, CP_SET_THREAD_BR);
366 
367 	/*
368 	 * This makes sure that BR doesn't race ahead and commit
369 	 * timestamp to memstore while BV is still processing
370 	 * this submission.
371 	 */
372 	OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
373 	OUT_RING(ring, 0);
374 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
375 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
376 	OUT_RING(ring, submit->seqno);
377 
378 	/* write the ringbuffer timestamp */
379 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
380 	OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
381 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
382 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
383 	OUT_RING(ring, submit->seqno);
384 
385 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
386 	OUT_RING(ring, CP_SET_THREAD_BOTH);
387 
388 	OUT_PKT7(ring, CP_SET_MARKER, 1);
389 	OUT_RING(ring, 0x100); /* IFPC enable */
390 
391 	trace_msm_gpu_submit_flush(submit,
392 		gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
393 
394 	a6xx_flush(gpu, ring);
395 }
396 
397 const struct adreno_reglist a612_hwcg[] = {
398 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
399 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
400 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
401 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
402 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
403 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
404 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
405 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
406 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
407 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
408 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
409 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
410 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
411 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
412 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
413 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
414 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
415 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
416 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
417 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
418 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
419 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
420 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
421 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
422 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
423 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
424 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
425 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
426 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
427 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
428 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
429 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
430 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
431 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
432 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
433 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
434 	{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
435 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
436 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
437 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
438 	{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
439 	{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
440 	{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
441 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
442 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
443 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
444 	{},
445 };
446 
447 /* For a615 family (a615, a616, a618 and a619) */
448 const struct adreno_reglist a615_hwcg[] = {
449 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0,  0x02222222},
450 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
451 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
452 	{REG_A6XX_RBBM_CLOCK_HYST_SP0,  0x0000F3CF},
453 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0,  0x02222222},
454 	{REG_A6XX_RBBM_CLOCK_CNTL_TP1,  0x02222222},
455 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
456 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
457 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
458 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
459 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
460 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
461 	{REG_A6XX_RBBM_CLOCK_HYST_TP0,  0x77777777},
462 	{REG_A6XX_RBBM_CLOCK_HYST_TP1,  0x77777777},
463 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
464 	{REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
465 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
466 	{REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
467 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
468 	{REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
469 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
470 	{REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
471 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
472 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
473 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
474 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
475 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
476 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
477 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE,  0x22222222},
478 	{REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
479 	{REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
480 	{REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
481 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE,  0x00000004},
482 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
483 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
484 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
485 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020},
486 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
487 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
488 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
489 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
490 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
491 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
492 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
493 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
494 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
495 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
496 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
497 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
498 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
499 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
500 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
501 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
502 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
503 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
504 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
505 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
506 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
507 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
508 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
509 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
510 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
511 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
512 	{},
513 };
514 
515 const struct adreno_reglist a630_hwcg[] = {
516 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
517 	{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
518 	{REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
519 	{REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
520 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
521 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
522 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
523 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
524 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
525 	{REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
526 	{REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
527 	{REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
528 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
529 	{REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
530 	{REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
531 	{REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
532 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
533 	{REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
534 	{REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
535 	{REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
536 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
537 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
538 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
539 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
540 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
541 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
542 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
543 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
544 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
545 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
546 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
547 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
548 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
549 	{REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
550 	{REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
551 	{REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
552 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
553 	{REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
554 	{REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
555 	{REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
556 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
557 	{REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
558 	{REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
559 	{REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
560 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
561 	{REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
562 	{REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
563 	{REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
564 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
565 	{REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
566 	{REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
567 	{REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
568 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
569 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
570 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
571 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
572 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
573 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
574 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
575 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
576 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
577 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
578 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
579 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
580 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
581 	{REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
582 	{REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
583 	{REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
584 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
585 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
586 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
587 	{REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
588 	{REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
589 	{REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
590 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
591 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
592 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
593 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
594 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
595 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
596 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
597 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
598 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
599 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
600 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
601 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
602 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
603 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
604 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
605 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
606 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
607 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
608 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
609 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
610 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
611 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
612 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
613 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
614 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
615 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
616 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
617 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
618 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
619 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
620 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
621 	{},
622 };
623 
624 const struct adreno_reglist a640_hwcg[] = {
625 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
626 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
627 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
628 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
629 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
630 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
631 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
632 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
633 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
634 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
635 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
636 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
637 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
638 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
639 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
640 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
641 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
642 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
643 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
644 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
645 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
646 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
647 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
648 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
649 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
650 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
651 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
652 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
653 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
654 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
655 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
656 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
657 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
658 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
659 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
660 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
661 	{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
662 	{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
663 	{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
664 	{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
665 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
666 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
667 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
668 	{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
669 	{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
670 	{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
671 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
672 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
673 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
674 	{},
675 };
676 
677 const struct adreno_reglist a650_hwcg[] = {
678 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
679 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
680 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
681 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
682 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
683 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
684 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
685 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
686 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
687 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
688 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
689 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
690 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
691 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
692 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
693 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
694 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
695 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
696 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
697 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
698 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
699 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
700 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
701 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
702 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
703 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
704 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
705 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
706 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
707 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
708 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
709 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
710 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
711 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
712 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
713 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
714 	{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
715 	{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
716 	{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
717 	{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
718 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
719 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
720 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
721 	{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
722 	{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
723 	{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
724 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
725 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
726 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
727 	{},
728 };
729 
730 const struct adreno_reglist a660_hwcg[] = {
731 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
732 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
733 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
734 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
735 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
736 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
737 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
738 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
739 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
740 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
741 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
742 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
743 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
744 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
745 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
746 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
747 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
748 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
749 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
750 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
751 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
752 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
753 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
754 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
755 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
756 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
757 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
758 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
759 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
760 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
761 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
762 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
763 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
764 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
765 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
766 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
767 	{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
768 	{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
769 	{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
770 	{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
771 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
772 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
773 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
774 	{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
775 	{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
776 	{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
777 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
778 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
779 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
780 	{},
781 };
782 
783 const struct adreno_reglist a690_hwcg[] = {
784 	{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
785 	{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
786 	{REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
787 	{REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
788 	{REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
789 	{REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
790 	{REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
791 	{REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
792 	{REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
793 	{REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
794 	{REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
795 	{REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
796 	{REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
797 	{REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
798 	{REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
799 	{REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
800 	{REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
801 	{REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
802 	{REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
803 	{REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
804 	{REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
805 	{REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
806 	{REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
807 	{REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
808 	{REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
809 	{REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
810 	{REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
811 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
812 	{REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
813 	{REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
814 	{REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
815 	{REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
816 	{REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
817 	{REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
818 	{REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
819 	{REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
820 	{REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
821 	{REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
822 	{REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
823 	{REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
824 	{REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
825 	{REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
826 	{REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
827 	{REG_A6XX_RBBM_CLOCK_CNTL, 0x8AA8AA82},
828 	{REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
829 	{REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
830 	{REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
831 	{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
832 	{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
833 	{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
834 	{REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
835 	{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
836 	{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
837 	{}
838 };
839 
840 const struct adreno_reglist a730_hwcg[] = {
841 	{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
842 	{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 },
843 	{ REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf },
844 	{ REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
845 	{ REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
846 	{ REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
847 	{ REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
848 	{ REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
849 	{ REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
850 	{ REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
851 	{ REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
852 	{ REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
853 	{ REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
854 	{ REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
855 	{ REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
856 	{ REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
857 	{ REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
858 	{ REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 },
859 	{ REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 },
860 	{ REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
861 	{ REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
862 	{ REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
863 	{ REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
864 	{ REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
865 	{ REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
866 	{ REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
867 	{ REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
868 	{ REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
869 	{ REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
870 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
871 	{ REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
872 	{ REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 },
873 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
874 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
875 	{ REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
876 	{ REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
877 	{ REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
878 	{ REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 },
879 	{ REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
880 	{ REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 },
881 	{ REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
882 	{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
883 	{ REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
884 	{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 },
885 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
886 	{ REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000223 },
887 	{ REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
888 	{ REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
889 	{ REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
890 	{ REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
891 	{ REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
892 	{ REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
893 	{ REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
894 	{},
895 };
896 
897 const struct adreno_reglist a740_hwcg[] = {
898 	{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
899 	{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x22022222 },
900 	{ REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x003cf3cf },
901 	{ REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
902 	{ REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
903 	{ REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
904 	{ REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
905 	{ REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
906 	{ REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
907 	{ REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
908 	{ REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
909 	{ REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
910 	{ REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
911 	{ REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
912 	{ REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
913 	{ REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
914 	{ REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
915 	{ REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x00222222 },
916 	{ REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000444 },
917 	{ REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000222 },
918 	{ REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
919 	{ REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
920 	{ REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
921 	{ REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
922 	{ REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
923 	{ REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
924 	{ REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
925 	{ REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
926 	{ REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
927 	{ REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
928 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
929 	{ REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
930 	{ REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00222222 },
931 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
932 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
933 	{ REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
934 	{ REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
935 	{ REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
936 	{ REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000000 },
937 	{ REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
938 	{ REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00000000 },
939 	{ REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
940 	{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
941 	{ REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
942 	{ REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
943 	{ REG_A7XX_RBBM_CLOCK_HYST2_VFD, 0x00000000 },
944 	{ REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000222 },
945 	{ REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
946 	{ REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
947 	{ REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
948 	{ REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
949 	{ REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
950 	{ REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
951 	{ REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
952 	{},
953 };
954 
955 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
956 {
957 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
958 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
959 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
960 	const struct adreno_reglist *reg;
961 	unsigned int i;
962 	u32 val, clock_cntl_on, cgc_mode;
963 
964 	if (!adreno_gpu->info->hwcg)
965 		return;
966 
967 	if (adreno_is_a630(adreno_gpu))
968 		clock_cntl_on = 0x8aa8aa02;
969 	else if (adreno_is_a610(adreno_gpu))
970 		clock_cntl_on = 0xaaa8aa82;
971 	else
972 		clock_cntl_on = 0x8aa8aa82;
973 
974 	if (adreno_is_a7xx(adreno_gpu)) {
975 		cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
976 
977 		gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
978 			  state ? cgc_mode : 0);
979 		gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
980 			  state ? 0x10111 : 0);
981 		gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
982 			  state ? 0x5555 : 0);
983 	}
984 
985 	val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
986 
987 	/* Don't re-program the registers if they are already correct */
988 	if ((!state && !val) || (state && (val == clock_cntl_on)))
989 		return;
990 
991 	/* Disable SP clock before programming HWCG registers */
992 	if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
993 		gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
994 
995 	for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
996 		gpu_write(gpu, reg->offset, state ? reg->value : 0);
997 
998 	/* Enable SP clock */
999 	if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
1000 		gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
1001 
1002 	gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
1003 }
1004 
1005 /* For a615, a616, a618, a619, a630, a640 and a680 */
1006 static const u32 a6xx_protect[] = {
1007 	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
1008 	A6XX_PROTECT_RDONLY(0x00501, 0x0005),
1009 	A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
1010 	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
1011 	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
1012 	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
1013 	A6XX_PROTECT_NORDWR(0x00800, 0x0082),
1014 	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
1015 	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
1016 	A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
1017 	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
1018 	A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
1019 	A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
1020 	A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
1021 	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
1022 	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
1023 	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
1024 	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
1025 	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
1026 	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
1027 	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
1028 	A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
1029 	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
1030 	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
1031 	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
1032 	A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
1033 	A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
1034 	A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
1035 	A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
1036 	A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
1037 	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
1038 	A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
1039 };
1040 
1041 /* These are for a620 and a650 */
1042 static const u32 a650_protect[] = {
1043 	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
1044 	A6XX_PROTECT_RDONLY(0x00501, 0x0005),
1045 	A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
1046 	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
1047 	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
1048 	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
1049 	A6XX_PROTECT_NORDWR(0x00800, 0x0082),
1050 	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
1051 	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
1052 	A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
1053 	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
1054 	A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
1055 	A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
1056 	A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
1057 	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
1058 	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
1059 	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
1060 	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
1061 	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
1062 	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
1063 	A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
1064 	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
1065 	A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
1066 	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
1067 	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
1068 	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
1069 	A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
1070 	A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
1071 	A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
1072 	A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
1073 	A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
1074 	A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
1075 	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
1076 	A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
1077 	A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
1078 	A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
1079 	A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
1080 	A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
1081 	A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
1082 };
1083 
1084 /* These are for a635 and a660 */
1085 static const u32 a660_protect[] = {
1086 	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
1087 	A6XX_PROTECT_RDONLY(0x00501, 0x0005),
1088 	A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
1089 	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
1090 	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
1091 	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
1092 	A6XX_PROTECT_NORDWR(0x00800, 0x0082),
1093 	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
1094 	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
1095 	A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
1096 	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
1097 	A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
1098 	A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
1099 	A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
1100 	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
1101 	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
1102 	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
1103 	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
1104 	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
1105 	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
1106 	A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
1107 	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
1108 	A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
1109 	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
1110 	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
1111 	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
1112 	A6XX_PROTECT_NORDWR(0x0ae50, 0x012f),
1113 	A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
1114 	A6XX_PROTECT_NORDWR(0x0b608, 0x0006),
1115 	A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
1116 	A6XX_PROTECT_NORDWR(0x0be20, 0x015f),
1117 	A6XX_PROTECT_NORDWR(0x0d000, 0x05ff),
1118 	A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
1119 	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
1120 	A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
1121 	A6XX_PROTECT_NORDWR(0x1a400, 0x1fff),
1122 	A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
1123 	A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
1124 	A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
1125 	A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
1126 	A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
1127 };
1128 
1129 /* These are for a690 */
1130 static const u32 a690_protect[] = {
1131 	A6XX_PROTECT_RDONLY(0x00000, 0x004ff),
1132 	A6XX_PROTECT_RDONLY(0x00501, 0x00001),
1133 	A6XX_PROTECT_RDONLY(0x0050b, 0x002f4),
1134 	A6XX_PROTECT_NORDWR(0x0050e, 0x00000),
1135 	A6XX_PROTECT_NORDWR(0x00510, 0x00000),
1136 	A6XX_PROTECT_NORDWR(0x00534, 0x00000),
1137 	A6XX_PROTECT_NORDWR(0x00800, 0x00082),
1138 	A6XX_PROTECT_NORDWR(0x008a0, 0x00008),
1139 	A6XX_PROTECT_NORDWR(0x008ab, 0x00024),
1140 	A6XX_PROTECT_RDONLY(0x008de, 0x000ae),
1141 	A6XX_PROTECT_NORDWR(0x00900, 0x0004d),
1142 	A6XX_PROTECT_NORDWR(0x0098d, 0x00272),
1143 	A6XX_PROTECT_NORDWR(0x00e00, 0x00001),
1144 	A6XX_PROTECT_NORDWR(0x00e03, 0x0000c),
1145 	A6XX_PROTECT_NORDWR(0x03c00, 0x000c3),
1146 	A6XX_PROTECT_RDONLY(0x03cc4, 0x01fff),
1147 	A6XX_PROTECT_NORDWR(0x08630, 0x001cf),
1148 	A6XX_PROTECT_NORDWR(0x08e00, 0x00000),
1149 	A6XX_PROTECT_NORDWR(0x08e08, 0x00007),
1150 	A6XX_PROTECT_NORDWR(0x08e50, 0x0001f),
1151 	A6XX_PROTECT_NORDWR(0x08e80, 0x0027f),
1152 	A6XX_PROTECT_NORDWR(0x09624, 0x001db),
1153 	A6XX_PROTECT_NORDWR(0x09e60, 0x00011),
1154 	A6XX_PROTECT_NORDWR(0x09e78, 0x00187),
1155 	A6XX_PROTECT_NORDWR(0x0a630, 0x001cf),
1156 	A6XX_PROTECT_NORDWR(0x0ae02, 0x00000),
1157 	A6XX_PROTECT_NORDWR(0x0ae50, 0x0012f),
1158 	A6XX_PROTECT_NORDWR(0x0b604, 0x00000),
1159 	A6XX_PROTECT_NORDWR(0x0b608, 0x00006),
1160 	A6XX_PROTECT_NORDWR(0x0be02, 0x00001),
1161 	A6XX_PROTECT_NORDWR(0x0be20, 0x0015f),
1162 	A6XX_PROTECT_NORDWR(0x0d000, 0x005ff),
1163 	A6XX_PROTECT_NORDWR(0x0f000, 0x00bff),
1164 	A6XX_PROTECT_RDONLY(0x0fc00, 0x01fff),
1165 	A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
1166 };
1167 
1168 static const u32 a730_protect[] = {
1169 	A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
1170 	A6XX_PROTECT_RDONLY(0x0050b, 0x0058),
1171 	A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
1172 	A6XX_PROTECT_NORDWR(0x00510, 0x0000),
1173 	A6XX_PROTECT_NORDWR(0x00534, 0x0000),
1174 	A6XX_PROTECT_RDONLY(0x005fb, 0x009d),
1175 	A6XX_PROTECT_NORDWR(0x00699, 0x01e9),
1176 	A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
1177 	A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
1178 	/* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
1179 	A6XX_PROTECT_RDONLY(0x008de, 0x0154),
1180 	A6XX_PROTECT_NORDWR(0x00900, 0x004d),
1181 	A6XX_PROTECT_NORDWR(0x0098d, 0x00b2),
1182 	A6XX_PROTECT_NORDWR(0x00a41, 0x01be),
1183 	A6XX_PROTECT_NORDWR(0x00df0, 0x0001),
1184 	A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
1185 	A6XX_PROTECT_NORDWR(0x00e07, 0x0008),
1186 	A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
1187 	A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
1188 	A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
1189 	A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
1190 	A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
1191 	A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
1192 	A6XX_PROTECT_NORDWR(0x08e80, 0x0280),
1193 	A6XX_PROTECT_NORDWR(0x09624, 0x01db),
1194 	A6XX_PROTECT_NORDWR(0x09e40, 0x0000),
1195 	A6XX_PROTECT_NORDWR(0x09e64, 0x000d),
1196 	A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
1197 	A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
1198 	A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
1199 	A6XX_PROTECT_NORDWR(0x0ae50, 0x000f),
1200 	A6XX_PROTECT_NORDWR(0x0ae66, 0x0003),
1201 	A6XX_PROTECT_NORDWR(0x0ae6f, 0x0003),
1202 	A6XX_PROTECT_NORDWR(0x0b604, 0x0003),
1203 	A6XX_PROTECT_NORDWR(0x0ec00, 0x0fff),
1204 	A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
1205 	A6XX_PROTECT_NORDWR(0x18400, 0x0053),
1206 	A6XX_PROTECT_RDONLY(0x18454, 0x0004),
1207 	A6XX_PROTECT_NORDWR(0x18459, 0x1fff),
1208 	A6XX_PROTECT_NORDWR(0x1a459, 0x1fff),
1209 	A6XX_PROTECT_NORDWR(0x1c459, 0x1fff),
1210 	A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
1211 	A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
1212 	A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
1213 	A6XX_PROTECT_NORDWR(0x1f878, 0x002a),
1214 	/* CP_PROTECT_REG[44, 46] are left untouched! */
1215 	0,
1216 	0,
1217 	0,
1218 	A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000),
1219 };
1220 
1221 static void a6xx_set_cp_protect(struct msm_gpu *gpu)
1222 {
1223 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1224 	const u32 *regs = a6xx_protect;
1225 	unsigned i, count, count_max;
1226 
1227 	if (adreno_is_a650(adreno_gpu)) {
1228 		regs = a650_protect;
1229 		count = ARRAY_SIZE(a650_protect);
1230 		count_max = 48;
1231 		BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
1232 	} else if (adreno_is_a690(adreno_gpu)) {
1233 		regs = a690_protect;
1234 		count = ARRAY_SIZE(a690_protect);
1235 		count_max = 48;
1236 		BUILD_BUG_ON(ARRAY_SIZE(a690_protect) > 48);
1237 	} else if (adreno_is_a660_family(adreno_gpu)) {
1238 		regs = a660_protect;
1239 		count = ARRAY_SIZE(a660_protect);
1240 		count_max = 48;
1241 		BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
1242 	} else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) {
1243 		regs = a730_protect;
1244 		count = ARRAY_SIZE(a730_protect);
1245 		count_max = 48;
1246 		BUILD_BUG_ON(ARRAY_SIZE(a730_protect) > 48);
1247 	} else {
1248 		regs = a6xx_protect;
1249 		count = ARRAY_SIZE(a6xx_protect);
1250 		count_max = 32;
1251 		BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
1252 	}
1253 
1254 	/*
1255 	 * Enable access protection to privileged registers, fault on an access
1256 	 * protect violation and select the last span to protect from the start
1257 	 * address all the way to the end of the register address space
1258 	 */
1259 	gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL,
1260 		  A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN |
1261 		  A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN |
1262 		  A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE);
1263 
1264 	for (i = 0; i < count - 1; i++) {
1265 		/* Intentionally skip writing to some registers */
1266 		if (regs[i])
1267 			gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
1268 	}
1269 	/* last CP_PROTECT to have "infinite" length on the last entry */
1270 	gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
1271 }
1272 
1273 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
1274 {
1275 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1276 	/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
1277 	u32 rgb565_predicator = 0;
1278 	/* Unknown, introduced with A650 family */
1279 	u32 uavflagprd_inv = 0;
1280 	/* Whether the minimum access length is 64 bits */
1281 	u32 min_acc_len = 0;
1282 	/* Entirely magic, per-GPU-gen value */
1283 	u32 ubwc_mode = 0;
1284 	/*
1285 	 * The Highest Bank Bit value represents the bit of the highest DDR bank.
1286 	 * We then subtract 13 from it (13 is the minimum value allowed by hw) and
1287 	 * write the lowest two bits of the remaining value as hbb_lo and the
1288 	 * one above it as hbb_hi to the hardware. This should ideally use DRAM
1289 	 * type detection.
1290 	 */
1291 	u32 hbb_hi = 0;
1292 	u32 hbb_lo = 2;
1293 	/* Unknown, introduced with A640/680 */
1294 	u32 amsbc = 0;
1295 
1296 	if (adreno_is_a610(adreno_gpu)) {
1297 		/* HBB = 14 */
1298 		hbb_lo = 1;
1299 		min_acc_len = 1;
1300 		ubwc_mode = 1;
1301 	}
1302 
1303 	/* a618 is using the hw default values */
1304 	if (adreno_is_a618(adreno_gpu))
1305 		return;
1306 
1307 	if (adreno_is_a619_holi(adreno_gpu))
1308 		hbb_lo = 0;
1309 
1310 	if (adreno_is_a640_family(adreno_gpu))
1311 		amsbc = 1;
1312 
1313 	if (adreno_is_a650(adreno_gpu) ||
1314 	    adreno_is_a660(adreno_gpu) ||
1315 	    adreno_is_a730(adreno_gpu) ||
1316 	    adreno_is_a740_family(adreno_gpu)) {
1317 		/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
1318 		hbb_lo = 3;
1319 		amsbc = 1;
1320 		rgb565_predicator = 1;
1321 		uavflagprd_inv = 2;
1322 	}
1323 
1324 	if (adreno_is_a690(adreno_gpu)) {
1325 		hbb_lo = 2;
1326 		amsbc = 1;
1327 		rgb565_predicator = 1;
1328 		uavflagprd_inv = 2;
1329 	}
1330 
1331 	if (adreno_is_7c3(adreno_gpu)) {
1332 		hbb_lo = 1;
1333 		amsbc = 1;
1334 		rgb565_predicator = 1;
1335 		uavflagprd_inv = 2;
1336 	}
1337 
1338 	gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
1339 		  rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
1340 		  min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
1341 
1342 	gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
1343 		  min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
1344 
1345 	gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
1346 		  uavflagprd_inv << 4 | min_acc_len << 3 |
1347 		  hbb_lo << 1 | ubwc_mode);
1348 
1349 	if (adreno_is_a7xx(adreno_gpu))
1350 		gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
1351 			  FIELD_PREP(GENMASK(8, 5), hbb_lo));
1352 
1353 	gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
1354 }
1355 
1356 static int a6xx_cp_init(struct msm_gpu *gpu)
1357 {
1358 	struct msm_ringbuffer *ring = gpu->rb[0];
1359 
1360 	OUT_PKT7(ring, CP_ME_INIT, 8);
1361 
1362 	OUT_RING(ring, 0x0000002f);
1363 
1364 	/* Enable multiple hardware contexts */
1365 	OUT_RING(ring, 0x00000003);
1366 
1367 	/* Enable error detection */
1368 	OUT_RING(ring, 0x20000000);
1369 
1370 	/* Don't enable header dump */
1371 	OUT_RING(ring, 0x00000000);
1372 	OUT_RING(ring, 0x00000000);
1373 
1374 	/* No workarounds enabled */
1375 	OUT_RING(ring, 0x00000000);
1376 
1377 	/* Pad rest of the cmds with 0's */
1378 	OUT_RING(ring, 0x00000000);
1379 	OUT_RING(ring, 0x00000000);
1380 
1381 	a6xx_flush(gpu, ring);
1382 	return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
1383 }
1384 
1385 static int a7xx_cp_init(struct msm_gpu *gpu)
1386 {
1387 	struct msm_ringbuffer *ring = gpu->rb[0];
1388 	u32 mask;
1389 
1390 	/* Disable concurrent binning before sending CP init */
1391 	OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
1392 	OUT_RING(ring, BIT(27));
1393 
1394 	OUT_PKT7(ring, CP_ME_INIT, 7);
1395 
1396 	/* Use multiple HW contexts */
1397 	mask = BIT(0);
1398 
1399 	/* Enable error detection */
1400 	mask |= BIT(1);
1401 
1402 	/* Set default reset state */
1403 	mask |= BIT(3);
1404 
1405 	/* Disable save/restore of performance counters across preemption */
1406 	mask |= BIT(6);
1407 
1408 	/* Enable the register init list with the spinlock */
1409 	mask |= BIT(8);
1410 
1411 	OUT_RING(ring, mask);
1412 
1413 	/* Enable multiple hardware contexts */
1414 	OUT_RING(ring, 0x00000003);
1415 
1416 	/* Enable error detection */
1417 	OUT_RING(ring, 0x20000000);
1418 
1419 	/* Operation mode mask */
1420 	OUT_RING(ring, 0x00000002);
1421 
1422 	/* *Don't* send a power up reg list for concurrent binning (TODO) */
1423 	/* Lo address */
1424 	OUT_RING(ring, 0x00000000);
1425 	/* Hi address */
1426 	OUT_RING(ring, 0x00000000);
1427 	/* BIT(31) set => read the regs from the list */
1428 	OUT_RING(ring, 0x00000000);
1429 
1430 	a6xx_flush(gpu, ring);
1431 	return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
1432 }
1433 
1434 /*
1435  * Check that the microcode version is new enough to include several key
1436  * security fixes. Return true if the ucode is safe.
1437  */
1438 static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
1439 		struct drm_gem_object *obj)
1440 {
1441 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1442 	struct msm_gpu *gpu = &adreno_gpu->base;
1443 	const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
1444 	u32 *buf = msm_gem_get_vaddr(obj);
1445 	bool ret = false;
1446 
1447 	if (IS_ERR(buf))
1448 		return false;
1449 
1450 	/* A7xx is safe! */
1451 	if (adreno_is_a7xx(adreno_gpu))
1452 		return true;
1453 
1454 	/*
1455 	 * Targets up to a640 (a618, a630 and a640) need to check for a
1456 	 * microcode version that is patched to support the whereami opcode or
1457 	 * one that is new enough to include it by default.
1458 	 *
1459 	 * a650 tier targets don't need whereami but still need to be
1460 	 * equal to or newer than 0.95 for other security fixes
1461 	 *
1462 	 * a660 targets have all the critical security fixes from the start
1463 	 */
1464 	if (!strcmp(sqe_name, "a630_sqe.fw")) {
1465 		/*
1466 		 * If the lowest nibble is 0xa that is an indication that this
1467 		 * microcode has been patched. The actual version is in dword
1468 		 * [3] but we only care about the patchlevel which is the lowest
1469 		 * nibble of dword [3]
1470 		 *
1471 		 * Otherwise check that the firmware is greater than or equal
1472 		 * to 1.90 which was the first version that had this fix built
1473 		 * in
1474 		 */
1475 		if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
1476 			(buf[0] & 0xfff) >= 0x190) {
1477 			a6xx_gpu->has_whereami = true;
1478 			ret = true;
1479 			goto out;
1480 		}
1481 
1482 		DRM_DEV_ERROR(&gpu->pdev->dev,
1483 			"a630 SQE ucode is too old. Have version %x need at least %x\n",
1484 			buf[0] & 0xfff, 0x190);
1485 	} else if (!strcmp(sqe_name, "a650_sqe.fw")) {
1486 		if ((buf[0] & 0xfff) >= 0x095) {
1487 			ret = true;
1488 			goto out;
1489 		}
1490 
1491 		DRM_DEV_ERROR(&gpu->pdev->dev,
1492 			"a650 SQE ucode is too old. Have version %x need at least %x\n",
1493 			buf[0] & 0xfff, 0x095);
1494 	} else if (!strcmp(sqe_name, "a660_sqe.fw")) {
1495 		ret = true;
1496 	} else {
1497 		DRM_DEV_ERROR(&gpu->pdev->dev,
1498 			"unknown GPU, add it to a6xx_ucode_check_version()!!\n");
1499 	}
1500 out:
1501 	msm_gem_put_vaddr(obj);
1502 	return ret;
1503 }
1504 
1505 static int a6xx_ucode_load(struct msm_gpu *gpu)
1506 {
1507 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1508 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1509 
1510 	if (!a6xx_gpu->sqe_bo) {
1511 		a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
1512 			adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
1513 
1514 		if (IS_ERR(a6xx_gpu->sqe_bo)) {
1515 			int ret = PTR_ERR(a6xx_gpu->sqe_bo);
1516 
1517 			a6xx_gpu->sqe_bo = NULL;
1518 			DRM_DEV_ERROR(&gpu->pdev->dev,
1519 				"Could not allocate SQE ucode: %d\n", ret);
1520 
1521 			return ret;
1522 		}
1523 
1524 		msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
1525 		if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
1526 			msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
1527 			drm_gem_object_put(a6xx_gpu->sqe_bo);
1528 
1529 			a6xx_gpu->sqe_bo = NULL;
1530 			return -EPERM;
1531 		}
1532 	}
1533 
1534 	/*
1535 	 * Expanded APRIV and targets that support WHERE_AM_I both need a
1536 	 * privileged buffer to store the RPTR shadow
1537 	 */
1538 	if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) &&
1539 	    !a6xx_gpu->shadow_bo) {
1540 		a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
1541 						      sizeof(u32) * gpu->nr_rings,
1542 						      MSM_BO_WC | MSM_BO_MAP_PRIV,
1543 						      gpu->aspace, &a6xx_gpu->shadow_bo,
1544 						      &a6xx_gpu->shadow_iova);
1545 
1546 		if (IS_ERR(a6xx_gpu->shadow))
1547 			return PTR_ERR(a6xx_gpu->shadow);
1548 
1549 		msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
1550 	}
1551 
1552 	return 0;
1553 }
1554 
1555 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
1556 {
1557 	static bool loaded;
1558 	int ret;
1559 
1560 	if (loaded)
1561 		return 0;
1562 
1563 	ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
1564 
1565 	loaded = !ret;
1566 	return ret;
1567 }
1568 
1569 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
1570 		       A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
1571 		       A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
1572 		       A6XX_RBBM_INT_0_MASK_CP_IB2 | \
1573 		       A6XX_RBBM_INT_0_MASK_CP_IB1 | \
1574 		       A6XX_RBBM_INT_0_MASK_CP_RB | \
1575 		       A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
1576 		       A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
1577 		       A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
1578 		       A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
1579 		       A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
1580 
1581 #define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
1582 		       A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
1583 		       A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
1584 		       A6XX_RBBM_INT_0_MASK_CP_SW | \
1585 		       A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
1586 		       A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
1587 		       A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
1588 		       A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
1589 		       A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
1590 		       A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
1591 		       A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
1592 		       A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
1593 		       A6XX_RBBM_INT_0_MASK_TSBWRITEERROR)
1594 
1595 #define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \
1596 			 A6XX_CP_APRIV_CNTL_RBFETCH | \
1597 			 A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \
1598 			 A6XX_CP_APRIV_CNTL_RBRPWB)
1599 
1600 #define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \
1601 			   A6XX_CP_APRIV_CNTL_CDREAD | \
1602 			   A6XX_CP_APRIV_CNTL_CDWRITE)
1603 
1604 static int hw_init(struct msm_gpu *gpu)
1605 {
1606 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1607 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1608 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1609 	u64 gmem_range_min;
1610 	int ret;
1611 
1612 	if (!adreno_has_gmu_wrapper(adreno_gpu)) {
1613 		/* Make sure the GMU keeps the GPU on while we set it up */
1614 		ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
1615 		if (ret)
1616 			return ret;
1617 	}
1618 
1619 	/* Clear GBIF halt in case GX domain was not collapsed */
1620 	if (adreno_is_a619_holi(adreno_gpu)) {
1621 		gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
1622 		gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
1623 		/* Let's make extra sure that the GPU can access the memory.. */
1624 		mb();
1625 	} else if (a6xx_has_gbif(adreno_gpu)) {
1626 		gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
1627 		gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
1628 		/* Let's make extra sure that the GPU can access the memory.. */
1629 		mb();
1630 	}
1631 
1632 	/* Some GPUs are stubborn and take their sweet time to unhalt GBIF! */
1633 	if (adreno_is_a7xx(adreno_gpu) && a6xx_has_gbif(adreno_gpu))
1634 		spin_until(!gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK));
1635 
1636 	gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
1637 
1638 	if (adreno_is_a619_holi(adreno_gpu))
1639 		a6xx_sptprac_enable(gmu);
1640 
1641 	/*
1642 	 * Disable the trusted memory range - we don't actually supported secure
1643 	 * memory rendering at this point in time and we don't want to block off
1644 	 * part of the virtual memory space.
1645 	 */
1646 	gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
1647 	gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
1648 
1649 	if (!adreno_is_a7xx(adreno_gpu)) {
1650 		/* Turn on 64 bit addressing for all blocks */
1651 		gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
1652 		gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
1653 		gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
1654 		gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
1655 		gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
1656 		gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
1657 		gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
1658 		gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
1659 		gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
1660 		gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
1661 		gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
1662 		gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
1663 	}
1664 
1665 	/* enable hardware clockgating */
1666 	a6xx_set_hwcg(gpu, true);
1667 
1668 	/* VBIF/GBIF start*/
1669 	if (adreno_is_a610(adreno_gpu) ||
1670 	    adreno_is_a640_family(adreno_gpu) ||
1671 	    adreno_is_a650_family(adreno_gpu) ||
1672 	    adreno_is_a7xx(adreno_gpu)) {
1673 		gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
1674 		gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
1675 		gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
1676 		gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
1677 		gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
1678 			  adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
1679 	} else {
1680 		gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
1681 	}
1682 
1683 	if (adreno_is_a630(adreno_gpu))
1684 		gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
1685 
1686 	if (adreno_is_a7xx(adreno_gpu))
1687 		gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0);
1688 
1689 	/* Make all blocks contribute to the GPU BUSY perf counter */
1690 	gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
1691 
1692 	/* Disable L2 bypass in the UCHE */
1693 	if (adreno_is_a7xx(adreno_gpu)) {
1694 		gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
1695 		gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
1696 	} else {
1697 		gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
1698 		gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
1699 		gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
1700 	}
1701 
1702 	if (!(adreno_is_a650_family(adreno_gpu) ||
1703 	      adreno_is_a730(adreno_gpu))) {
1704 		gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
1705 
1706 		/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
1707 		gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min);
1708 
1709 		gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
1710 			gmem_range_min + adreno_gpu->info->gmem - 1);
1711 	}
1712 
1713 	if (adreno_is_a7xx(adreno_gpu))
1714 		gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23));
1715 	else {
1716 		gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
1717 		gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
1718 	}
1719 
1720 	if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
1721 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
1722 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
1723 	} else if (adreno_is_a610(adreno_gpu)) {
1724 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
1725 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
1726 	} else if (!adreno_is_a7xx(adreno_gpu)) {
1727 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
1728 		gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
1729 	}
1730 
1731 	if (adreno_is_a660_family(adreno_gpu))
1732 		gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
1733 
1734 	/* Setting the mem pool size */
1735 	if (adreno_is_a610(adreno_gpu)) {
1736 		gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
1737 		gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
1738 	} else if (!adreno_is_a7xx(adreno_gpu))
1739 		gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
1740 
1741 	/* Setting the primFifo thresholds default values,
1742 	 * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
1743 	*/
1744 	if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
1745 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
1746 	else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
1747 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
1748 	else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
1749 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
1750 	else if (adreno_is_a619(adreno_gpu))
1751 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
1752 	else if (adreno_is_a610(adreno_gpu))
1753 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
1754 	else if (!adreno_is_a7xx(adreno_gpu))
1755 		gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
1756 
1757 	/* Set the AHB default slave response to "ERROR" */
1758 	gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
1759 
1760 	/* Turn on performance counters */
1761 	gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
1762 
1763 	if (adreno_is_a7xx(adreno_gpu)) {
1764 		/* Turn on the IFPC counter (countable 4 on XOCLK4) */
1765 		gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
1766 			  FIELD_PREP(GENMASK(7, 0), 0x4));
1767 	}
1768 
1769 	/* Select CP0 to always count cycles */
1770 	gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
1771 
1772 	a6xx_set_ubwc_config(gpu);
1773 
1774 	/* Enable fault detection */
1775 	if (adreno_is_a730(adreno_gpu) ||
1776 	    adreno_is_a740_family(adreno_gpu))
1777 		gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
1778 	else if (adreno_is_a619(adreno_gpu))
1779 		gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
1780 	else if (adreno_is_a610(adreno_gpu))
1781 		gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
1782 	else
1783 		gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
1784 
1785 	gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
1786 
1787 	/* Set weights for bicubic filtering */
1788 	if (adreno_is_a650_family(adreno_gpu)) {
1789 		gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
1790 		gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
1791 			0x3fe05ff4);
1792 		gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
1793 			0x3fa0ebee);
1794 		gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
1795 			0x3f5193ed);
1796 		gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
1797 			0x3f0243f0);
1798 	}
1799 
1800 	/* Set up the CX GMU counter 0 to count busy ticks */
1801 	gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
1802 
1803 	/* Enable the power counter */
1804 	gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
1805 	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
1806 
1807 	/* Protect registers from the CP */
1808 	a6xx_set_cp_protect(gpu);
1809 
1810 	if (adreno_is_a660_family(adreno_gpu)) {
1811 		gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
1812 		gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
1813 	}
1814 
1815 	/* Set dualQ + disable afull for A660 GPU */
1816 	if (adreno_is_a660(adreno_gpu))
1817 		gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
1818 	else if (adreno_is_a7xx(adreno_gpu))
1819 		gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
1820 			  FIELD_PREP(GENMASK(19, 16), 6) |
1821 			  FIELD_PREP(GENMASK(15, 12), 6) |
1822 			  FIELD_PREP(GENMASK(11, 8), 9) |
1823 			  BIT(3) | BIT(2) |
1824 			  FIELD_PREP(GENMASK(1, 0), 2));
1825 
1826 	/* Enable expanded apriv for targets that support it */
1827 	if (gpu->hw_apriv) {
1828 		if (adreno_is_a7xx(adreno_gpu)) {
1829 			gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
1830 				  A7XX_BR_APRIVMASK);
1831 			gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL,
1832 				  A7XX_APRIV_MASK);
1833 			gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL,
1834 				  A7XX_APRIV_MASK);
1835 		} else
1836 			gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
1837 				  BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1));
1838 	}
1839 
1840 	/* Enable interrupts */
1841 	gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK,
1842 		  adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK);
1843 
1844 	ret = adreno_hw_init(gpu);
1845 	if (ret)
1846 		goto out;
1847 
1848 	gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
1849 
1850 	/* Set the ringbuffer address */
1851 	gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
1852 
1853 	/* Targets that support extended APRIV can use the RPTR shadow from
1854 	 * hardware but all the other ones need to disable the feature. Targets
1855 	 * that support the WHERE_AM_I opcode can use that instead
1856 	 */
1857 	if (adreno_gpu->base.hw_apriv)
1858 		gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
1859 	else
1860 		gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
1861 			MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
1862 
1863 	/* Configure the RPTR shadow if needed: */
1864 	if (a6xx_gpu->shadow_bo) {
1865 		gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
1866 			shadowptr(a6xx_gpu, gpu->rb[0]));
1867 	}
1868 
1869 	/* ..which means "always" on A7xx, also for BV shadow */
1870 	if (adreno_is_a7xx(adreno_gpu)) {
1871 		gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
1872 			    rbmemptr(gpu->rb[0], bv_fence));
1873 	}
1874 
1875 	/* Always come up on rb 0 */
1876 	a6xx_gpu->cur_ring = gpu->rb[0];
1877 
1878 	gpu->cur_ctx_seqno = 0;
1879 
1880 	/* Enable the SQE_to start the CP engine */
1881 	gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
1882 
1883 	ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
1884 	if (ret)
1885 		goto out;
1886 
1887 	/*
1888 	 * Try to load a zap shader into the secure world. If successful
1889 	 * we can use the CP to switch out of secure mode. If not then we
1890 	 * have no resource but to try to switch ourselves out manually. If we
1891 	 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
1892 	 * be blocked and a permissions violation will soon follow.
1893 	 */
1894 	ret = a6xx_zap_shader_init(gpu);
1895 	if (!ret) {
1896 		OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
1897 		OUT_RING(gpu->rb[0], 0x00000000);
1898 
1899 		a6xx_flush(gpu, gpu->rb[0]);
1900 		if (!a6xx_idle(gpu, gpu->rb[0]))
1901 			return -EINVAL;
1902 	} else if (ret == -ENODEV) {
1903 		/*
1904 		 * This device does not use zap shader (but print a warning
1905 		 * just in case someone got their dt wrong.. hopefully they
1906 		 * have a debug UART to realize the error of their ways...
1907 		 * if you mess this up you are about to crash horribly)
1908 		 */
1909 		dev_warn_once(gpu->dev->dev,
1910 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1911 		gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1912 		ret = 0;
1913 	} else {
1914 		return ret;
1915 	}
1916 
1917 out:
1918 	if (adreno_has_gmu_wrapper(adreno_gpu))
1919 		return ret;
1920 	/*
1921 	 * Tell the GMU that we are done touching the GPU and it can start power
1922 	 * management
1923 	 */
1924 	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
1925 
1926 	if (a6xx_gpu->gmu.legacy) {
1927 		/* Take the GMU out of its special boot mode */
1928 		a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
1929 	}
1930 
1931 	return ret;
1932 }
1933 
1934 static int a6xx_hw_init(struct msm_gpu *gpu)
1935 {
1936 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1937 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1938 	int ret;
1939 
1940 	mutex_lock(&a6xx_gpu->gmu.lock);
1941 	ret = hw_init(gpu);
1942 	mutex_unlock(&a6xx_gpu->gmu.lock);
1943 
1944 	return ret;
1945 }
1946 
1947 static void a6xx_dump(struct msm_gpu *gpu)
1948 {
1949 	DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
1950 			gpu_read(gpu, REG_A6XX_RBBM_STATUS));
1951 	adreno_dump(gpu);
1952 }
1953 
1954 static void a6xx_recover(struct msm_gpu *gpu)
1955 {
1956 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1957 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1958 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1959 	int i, active_submits;
1960 
1961 	adreno_dump_info(gpu);
1962 
1963 	for (i = 0; i < 8; i++)
1964 		DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
1965 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
1966 
1967 	if (hang_debug)
1968 		a6xx_dump(gpu);
1969 
1970 	/*
1971 	 * To handle recovery specific sequences during the rpm suspend we are
1972 	 * about to trigger
1973 	 */
1974 	a6xx_gpu->hung = true;
1975 
1976 	/* Halt SQE first */
1977 	gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
1978 
1979 	pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
1980 
1981 	/* active_submit won't change until we make a submission */
1982 	mutex_lock(&gpu->active_lock);
1983 	active_submits = gpu->active_submits;
1984 
1985 	/*
1986 	 * Temporarily clear active_submits count to silence a WARN() in the
1987 	 * runtime suspend cb
1988 	 */
1989 	gpu->active_submits = 0;
1990 
1991 	if (adreno_has_gmu_wrapper(adreno_gpu)) {
1992 		/* Drain the outstanding traffic on memory buses */
1993 		a6xx_bus_clear_pending_transactions(adreno_gpu, true);
1994 
1995 		/* Reset the GPU to a clean state */
1996 		a6xx_gpu_sw_reset(gpu, true);
1997 		a6xx_gpu_sw_reset(gpu, false);
1998 	}
1999 
2000 	reinit_completion(&gmu->pd_gate);
2001 	dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
2002 	dev_pm_genpd_synced_poweroff(gmu->cxpd);
2003 
2004 	/* Drop the rpm refcount from active submits */
2005 	if (active_submits)
2006 		pm_runtime_put(&gpu->pdev->dev);
2007 
2008 	/* And the final one from recover worker */
2009 	pm_runtime_put_sync(&gpu->pdev->dev);
2010 
2011 	if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
2012 		DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
2013 
2014 	dev_pm_genpd_remove_notifier(gmu->cxpd);
2015 
2016 	pm_runtime_use_autosuspend(&gpu->pdev->dev);
2017 
2018 	if (active_submits)
2019 		pm_runtime_get(&gpu->pdev->dev);
2020 
2021 	pm_runtime_get_sync(&gpu->pdev->dev);
2022 
2023 	gpu->active_submits = active_submits;
2024 	mutex_unlock(&gpu->active_lock);
2025 
2026 	msm_gpu_hw_init(gpu);
2027 	a6xx_gpu->hung = false;
2028 }
2029 
2030 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
2031 {
2032 	static const char *uche_clients[7] = {
2033 		"VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
2034 	};
2035 	u32 val;
2036 
2037 	if (mid < 1 || mid > 3)
2038 		return "UNKNOWN";
2039 
2040 	/*
2041 	 * The source of the data depends on the mid ID read from FSYNR1.
2042 	 * and the client ID read from the UCHE block
2043 	 */
2044 	val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
2045 
2046 	/* mid = 3 is most precise and refers to only one block per client */
2047 	if (mid == 3)
2048 		return uche_clients[val & 7];
2049 
2050 	/* For mid=2 the source is TP or VFD except when the client id is 0 */
2051 	if (mid == 2)
2052 		return ((val & 7) == 0) ? "TP" : "TP|VFD";
2053 
2054 	/* For mid=1 just return "UCHE" as a catchall for everything else */
2055 	return "UCHE";
2056 }
2057 
2058 static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
2059 {
2060 	if (id == 0)
2061 		return "CP";
2062 	else if (id == 4)
2063 		return "CCU";
2064 	else if (id == 6)
2065 		return "CDP Prefetch";
2066 
2067 	return a6xx_uche_fault_block(gpu, id);
2068 }
2069 
2070 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
2071 {
2072 	struct msm_gpu *gpu = arg;
2073 	struct adreno_smmu_fault_info *info = data;
2074 	const char *block = "unknown";
2075 
2076 	u32 scratch[] = {
2077 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
2078 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
2079 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
2080 			gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
2081 	};
2082 
2083 	if (info)
2084 		block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
2085 
2086 	return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
2087 }
2088 
2089 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
2090 {
2091 	u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
2092 
2093 	if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
2094 		u32 val;
2095 
2096 		gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
2097 		val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
2098 		dev_err_ratelimited(&gpu->pdev->dev,
2099 			"CP | opcode error | possible opcode=0x%8.8X\n",
2100 			val);
2101 	}
2102 
2103 	if (status & A6XX_CP_INT_CP_UCODE_ERROR)
2104 		dev_err_ratelimited(&gpu->pdev->dev,
2105 			"CP ucode error interrupt\n");
2106 
2107 	if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
2108 		dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
2109 			gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
2110 
2111 	if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
2112 		u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
2113 
2114 		dev_err_ratelimited(&gpu->pdev->dev,
2115 			"CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
2116 			val & (1 << 20) ? "READ" : "WRITE",
2117 			(val & 0x3ffff), val);
2118 	}
2119 
2120 	if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu)))
2121 		dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
2122 
2123 	if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
2124 		dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
2125 
2126 	if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
2127 		dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
2128 
2129 }
2130 
2131 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
2132 {
2133 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2134 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2135 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
2136 
2137 	/*
2138 	 * If stalled on SMMU fault, we could trip the GPU's hang detection,
2139 	 * but the fault handler will trigger the devcore dump, and we want
2140 	 * to otherwise resume normally rather than killing the submit, so
2141 	 * just bail.
2142 	 */
2143 	if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
2144 		return;
2145 
2146 	/*
2147 	 * Force the GPU to stay on until after we finish
2148 	 * collecting information
2149 	 */
2150 	if (!adreno_has_gmu_wrapper(adreno_gpu))
2151 		gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
2152 
2153 	DRM_DEV_ERROR(&gpu->pdev->dev,
2154 		"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
2155 		ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
2156 		gpu_read(gpu, REG_A6XX_RBBM_STATUS),
2157 		gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
2158 		gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
2159 		gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
2160 		gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
2161 		gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
2162 		gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
2163 
2164 	/* Turn off the hangcheck timer to keep it from bothering us */
2165 	del_timer(&gpu->hangcheck_timer);
2166 
2167 	kthread_queue_work(gpu->worker, &gpu->recover_work);
2168 }
2169 
2170 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
2171 {
2172 	struct msm_drm_private *priv = gpu->dev->dev_private;
2173 	u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
2174 
2175 	gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
2176 
2177 	if (priv->disable_err_irq)
2178 		status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
2179 
2180 	if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
2181 		a6xx_fault_detect_irq(gpu);
2182 
2183 	if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
2184 		dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
2185 
2186 	if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
2187 		a6xx_cp_hw_err_irq(gpu);
2188 
2189 	if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
2190 		dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
2191 
2192 	if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
2193 		dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
2194 
2195 	if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
2196 		dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
2197 
2198 	if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
2199 		msm_gpu_retire(gpu);
2200 
2201 	return IRQ_HANDLED;
2202 }
2203 
2204 static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
2205 {
2206 	llcc_slice_deactivate(a6xx_gpu->llc_slice);
2207 	llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
2208 }
2209 
2210 static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
2211 {
2212 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
2213 	struct msm_gpu *gpu = &adreno_gpu->base;
2214 	u32 cntl1_regval = 0;
2215 
2216 	if (IS_ERR(a6xx_gpu->llc_mmio))
2217 		return;
2218 
2219 	if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
2220 		u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
2221 
2222 		gpu_scid &= 0x1f;
2223 		cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
2224 			       (gpu_scid << 15) | (gpu_scid << 20);
2225 
2226 		/* On A660, the SCID programming for UCHE traffic is done in
2227 		 * A6XX_GBIF_SCACHE_CNTL0[14:10]
2228 		 */
2229 		if (adreno_is_a660_family(adreno_gpu))
2230 			gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
2231 				(1 << 8), (gpu_scid << 10) | (1 << 8));
2232 	}
2233 
2234 	/*
2235 	 * For targets with a MMU500, activate the slice but don't program the
2236 	 * register.  The XBL will take care of that.
2237 	 */
2238 	if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
2239 		if (!a6xx_gpu->have_mmu500) {
2240 			u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
2241 
2242 			gpuhtw_scid &= 0x1f;
2243 			cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
2244 		}
2245 	}
2246 
2247 	if (!cntl1_regval)
2248 		return;
2249 
2250 	/*
2251 	 * Program the slice IDs for the various GPU blocks and GPU MMU
2252 	 * pagetables
2253 	 */
2254 	if (!a6xx_gpu->have_mmu500) {
2255 		a6xx_llc_write(a6xx_gpu,
2256 			REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
2257 
2258 		/*
2259 		 * Program cacheability overrides to not allocate cache
2260 		 * lines on a write miss
2261 		 */
2262 		a6xx_llc_rmw(a6xx_gpu,
2263 			REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
2264 		return;
2265 	}
2266 
2267 	gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
2268 }
2269 
2270 static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
2271 {
2272 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
2273 	struct msm_gpu *gpu = &adreno_gpu->base;
2274 
2275 	if (IS_ERR(a6xx_gpu->llc_mmio))
2276 		return;
2277 
2278 	if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
2279 		u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
2280 
2281 		gpu_scid &= GENMASK(4, 0);
2282 
2283 		gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
2284 			  FIELD_PREP(GENMASK(29, 25), gpu_scid) |
2285 			  FIELD_PREP(GENMASK(24, 20), gpu_scid) |
2286 			  FIELD_PREP(GENMASK(19, 15), gpu_scid) |
2287 			  FIELD_PREP(GENMASK(14, 10), gpu_scid) |
2288 			  FIELD_PREP(GENMASK(9, 5), gpu_scid) |
2289 			  FIELD_PREP(GENMASK(4, 0), gpu_scid));
2290 
2291 		gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
2292 			  FIELD_PREP(GENMASK(14, 10), gpu_scid) |
2293 			  BIT(8));
2294 	}
2295 
2296 	llcc_slice_activate(a6xx_gpu->htw_llc_slice);
2297 }
2298 
2299 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
2300 {
2301 	/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
2302 	if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
2303 		return;
2304 
2305 	llcc_slice_putd(a6xx_gpu->llc_slice);
2306 	llcc_slice_putd(a6xx_gpu->htw_llc_slice);
2307 }
2308 
2309 static void a6xx_llc_slices_init(struct platform_device *pdev,
2310 		struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
2311 {
2312 	struct device_node *phandle;
2313 
2314 	/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
2315 	if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
2316 		return;
2317 
2318 	/*
2319 	 * There is a different programming path for A6xx targets with an
2320 	 * mmu500 attached, so detect if that is the case
2321 	 */
2322 	phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
2323 	a6xx_gpu->have_mmu500 = (phandle &&
2324 		of_device_is_compatible(phandle, "arm,mmu-500"));
2325 	of_node_put(phandle);
2326 
2327 	if (is_a7xx || !a6xx_gpu->have_mmu500)
2328 		a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
2329 	else
2330 		a6xx_gpu->llc_mmio = NULL;
2331 
2332 	a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
2333 	a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
2334 
2335 	if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
2336 		a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
2337 }
2338 
2339 #define GBIF_CLIENT_HALT_MASK		BIT(0)
2340 #define GBIF_ARB_HALT_MASK		BIT(1)
2341 #define VBIF_XIN_HALT_CTRL0_MASK	GENMASK(3, 0)
2342 #define VBIF_RESET_ACK_MASK		0xF0
2343 #define GPR0_GBIF_HALT_REQUEST		0x1E0
2344 
2345 void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
2346 {
2347 	struct msm_gpu *gpu = &adreno_gpu->base;
2348 
2349 	if (adreno_is_a619_holi(adreno_gpu)) {
2350 		gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
2351 		spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
2352 				(VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
2353 	} else if (!a6xx_has_gbif(adreno_gpu)) {
2354 		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
2355 		spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
2356 				(VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
2357 		gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
2358 
2359 		return;
2360 	}
2361 
2362 	if (gx_off) {
2363 		/* Halt the gx side of GBIF */
2364 		gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
2365 		spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
2366 	}
2367 
2368 	/* Halt new client requests on GBIF */
2369 	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
2370 	spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
2371 			(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
2372 
2373 	/* Halt all AXI requests on GBIF */
2374 	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
2375 	spin_until((gpu_read(gpu,  REG_A6XX_GBIF_HALT_ACK) &
2376 			(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
2377 
2378 	/* The GBIF halt needs to be explicitly cleared */
2379 	gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
2380 }
2381 
2382 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
2383 {
2384 	/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
2385 	if (adreno_is_a610(to_adreno_gpu(gpu)))
2386 		return;
2387 
2388 	gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
2389 	/* Perform a bogus read and add a brief delay to ensure ordering. */
2390 	gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
2391 	udelay(1);
2392 
2393 	/* The reset line needs to be asserted for at least 100 us */
2394 	if (assert)
2395 		udelay(100);
2396 }
2397 
2398 static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
2399 {
2400 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2401 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2402 	int ret;
2403 
2404 	gpu->needs_hw_init = true;
2405 
2406 	trace_msm_gpu_resume(0);
2407 
2408 	mutex_lock(&a6xx_gpu->gmu.lock);
2409 	ret = a6xx_gmu_resume(a6xx_gpu);
2410 	mutex_unlock(&a6xx_gpu->gmu.lock);
2411 	if (ret)
2412 		return ret;
2413 
2414 	msm_devfreq_resume(gpu);
2415 
2416 	adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
2417 
2418 	return ret;
2419 }
2420 
2421 static int a6xx_pm_resume(struct msm_gpu *gpu)
2422 {
2423 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2424 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2425 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
2426 	unsigned long freq = gpu->fast_rate;
2427 	struct dev_pm_opp *opp;
2428 	int ret;
2429 
2430 	gpu->needs_hw_init = true;
2431 
2432 	trace_msm_gpu_resume(0);
2433 
2434 	mutex_lock(&a6xx_gpu->gmu.lock);
2435 
2436 	opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
2437 	if (IS_ERR(opp)) {
2438 		ret = PTR_ERR(opp);
2439 		goto err_set_opp;
2440 	}
2441 	dev_pm_opp_put(opp);
2442 
2443 	/* Set the core clock and bus bw, having VDD scaling in mind */
2444 	dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
2445 
2446 	pm_runtime_resume_and_get(gmu->dev);
2447 	pm_runtime_resume_and_get(gmu->gxpd);
2448 
2449 	ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
2450 	if (ret)
2451 		goto err_bulk_clk;
2452 
2453 	if (adreno_is_a619_holi(adreno_gpu))
2454 		a6xx_sptprac_enable(gmu);
2455 
2456 	/* If anything goes south, tear the GPU down piece by piece.. */
2457 	if (ret) {
2458 err_bulk_clk:
2459 		pm_runtime_put(gmu->gxpd);
2460 		pm_runtime_put(gmu->dev);
2461 		dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
2462 	}
2463 err_set_opp:
2464 	mutex_unlock(&a6xx_gpu->gmu.lock);
2465 
2466 	if (!ret)
2467 		msm_devfreq_resume(gpu);
2468 
2469 	return ret;
2470 }
2471 
2472 static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
2473 {
2474 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2475 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2476 	int i, ret;
2477 
2478 	trace_msm_gpu_suspend(0);
2479 
2480 	a6xx_llc_deactivate(a6xx_gpu);
2481 
2482 	msm_devfreq_suspend(gpu);
2483 
2484 	mutex_lock(&a6xx_gpu->gmu.lock);
2485 	ret = a6xx_gmu_stop(a6xx_gpu);
2486 	mutex_unlock(&a6xx_gpu->gmu.lock);
2487 	if (ret)
2488 		return ret;
2489 
2490 	if (a6xx_gpu->shadow_bo)
2491 		for (i = 0; i < gpu->nr_rings; i++)
2492 			a6xx_gpu->shadow[i] = 0;
2493 
2494 	gpu->suspend_count++;
2495 
2496 	return 0;
2497 }
2498 
2499 static int a6xx_pm_suspend(struct msm_gpu *gpu)
2500 {
2501 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2502 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2503 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
2504 	int i;
2505 
2506 	trace_msm_gpu_suspend(0);
2507 
2508 	msm_devfreq_suspend(gpu);
2509 
2510 	mutex_lock(&a6xx_gpu->gmu.lock);
2511 
2512 	/* Drain the outstanding traffic on memory buses */
2513 	a6xx_bus_clear_pending_transactions(adreno_gpu, true);
2514 
2515 	if (adreno_is_a619_holi(adreno_gpu))
2516 		a6xx_sptprac_disable(gmu);
2517 
2518 	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
2519 
2520 	pm_runtime_put_sync(gmu->gxpd);
2521 	dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
2522 	pm_runtime_put_sync(gmu->dev);
2523 
2524 	mutex_unlock(&a6xx_gpu->gmu.lock);
2525 
2526 	if (a6xx_gpu->shadow_bo)
2527 		for (i = 0; i < gpu->nr_rings; i++)
2528 			a6xx_gpu->shadow[i] = 0;
2529 
2530 	gpu->suspend_count++;
2531 
2532 	return 0;
2533 }
2534 
2535 static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
2536 {
2537 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2538 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2539 
2540 	mutex_lock(&a6xx_gpu->gmu.lock);
2541 
2542 	/* Force the GPU power on so we can read this register */
2543 	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
2544 
2545 	*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
2546 
2547 	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
2548 
2549 	mutex_unlock(&a6xx_gpu->gmu.lock);
2550 
2551 	return 0;
2552 }
2553 
2554 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
2555 {
2556 	*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
2557 	return 0;
2558 }
2559 
2560 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
2561 {
2562 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2563 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2564 
2565 	return a6xx_gpu->cur_ring;
2566 }
2567 
2568 static void a6xx_destroy(struct msm_gpu *gpu)
2569 {
2570 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2571 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2572 
2573 	if (a6xx_gpu->sqe_bo) {
2574 		msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
2575 		drm_gem_object_put(a6xx_gpu->sqe_bo);
2576 	}
2577 
2578 	if (a6xx_gpu->shadow_bo) {
2579 		msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
2580 		drm_gem_object_put(a6xx_gpu->shadow_bo);
2581 	}
2582 
2583 	a6xx_llc_slices_destroy(a6xx_gpu);
2584 
2585 	a6xx_gmu_remove(a6xx_gpu);
2586 
2587 	adreno_gpu_cleanup(adreno_gpu);
2588 
2589 	kfree(a6xx_gpu);
2590 }
2591 
2592 static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
2593 {
2594 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2595 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2596 	u64 busy_cycles;
2597 
2598 	/* 19.2MHz */
2599 	*out_sample_rate = 19200000;
2600 
2601 	busy_cycles = gmu_read64(&a6xx_gpu->gmu,
2602 			REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
2603 			REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
2604 
2605 	return busy_cycles;
2606 }
2607 
2608 static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
2609 			      bool suspended)
2610 {
2611 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2612 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2613 
2614 	mutex_lock(&a6xx_gpu->gmu.lock);
2615 	a6xx_gmu_set_freq(gpu, opp, suspended);
2616 	mutex_unlock(&a6xx_gpu->gmu.lock);
2617 }
2618 
2619 static struct msm_gem_address_space *
2620 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
2621 {
2622 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2623 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2624 	unsigned long quirks = 0;
2625 
2626 	/*
2627 	 * This allows GPU to set the bus attributes required to use system
2628 	 * cache on behalf of the iommu page table walker.
2629 	 */
2630 	if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
2631 	    !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
2632 		quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
2633 
2634 	return adreno_iommu_create_address_space(gpu, pdev, quirks);
2635 }
2636 
2637 static struct msm_gem_address_space *
2638 a6xx_create_private_address_space(struct msm_gpu *gpu)
2639 {
2640 	struct msm_mmu *mmu;
2641 
2642 	mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
2643 
2644 	if (IS_ERR(mmu))
2645 		return ERR_CAST(mmu);
2646 
2647 	return msm_gem_address_space_create(mmu,
2648 		"gpu", 0x100000000ULL,
2649 		adreno_private_address_space_size(gpu));
2650 }
2651 
2652 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
2653 {
2654 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2655 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2656 
2657 	if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
2658 		return a6xx_gpu->shadow[ring->id];
2659 
2660 	return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
2661 }
2662 
2663 static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
2664 {
2665 	struct msm_cp_state cp_state = {
2666 		.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
2667 		.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
2668 		.ib1_rem  = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
2669 		.ib2_rem  = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
2670 	};
2671 	bool progress;
2672 
2673 	/*
2674 	 * Adjust the remaining data to account for what has already been
2675 	 * fetched from memory, but not yet consumed by the SQE.
2676 	 *
2677 	 * This is not *technically* correct, the amount buffered could
2678 	 * exceed the IB size due to hw prefetching ahead, but:
2679 	 *
2680 	 * (1) We aren't trying to find the exact position, just whether
2681 	 *     progress has been made
2682 	 * (2) The CP_REG_TO_MEM at the end of a submit should be enough
2683 	 *     to prevent prefetching into an unrelated submit.  (And
2684 	 *     either way, at some point the ROQ will be full.)
2685 	 */
2686 	cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16;
2687 	cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16;
2688 
2689 	progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
2690 
2691 	ring->last_cp_state = cp_state;
2692 
2693 	return progress;
2694 }
2695 
2696 static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse)
2697 {
2698 	if (!info->speedbins)
2699 		return UINT_MAX;
2700 
2701 	for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++)
2702 		if (info->speedbins[i].fuse == fuse)
2703 			return BIT(info->speedbins[i].speedbin);
2704 
2705 	return UINT_MAX;
2706 }
2707 
2708 static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info)
2709 {
2710 	u32 supp_hw;
2711 	u32 speedbin;
2712 	int ret;
2713 
2714 	ret = adreno_read_speedbin(dev, &speedbin);
2715 	/*
2716 	 * -ENOENT means that the platform doesn't support speedbin which is
2717 	 * fine
2718 	 */
2719 	if (ret == -ENOENT) {
2720 		return 0;
2721 	} else if (ret) {
2722 		dev_err_probe(dev, ret,
2723 			      "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
2724 		return ret;
2725 	}
2726 
2727 	supp_hw = fuse_to_supp_hw(info, speedbin);
2728 
2729 	if (supp_hw == UINT_MAX) {
2730 		DRM_DEV_ERROR(dev,
2731 			"missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
2732 			speedbin);
2733 		supp_hw = BIT(0); /* Default */
2734 	}
2735 
2736 	ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
2737 	if (ret)
2738 		return ret;
2739 
2740 	return 0;
2741 }
2742 
2743 static const struct adreno_gpu_funcs funcs = {
2744 	.base = {
2745 		.get_param = adreno_get_param,
2746 		.set_param = adreno_set_param,
2747 		.hw_init = a6xx_hw_init,
2748 		.ucode_load = a6xx_ucode_load,
2749 		.pm_suspend = a6xx_gmu_pm_suspend,
2750 		.pm_resume = a6xx_gmu_pm_resume,
2751 		.recover = a6xx_recover,
2752 		.submit = a6xx_submit,
2753 		.active_ring = a6xx_active_ring,
2754 		.irq = a6xx_irq,
2755 		.destroy = a6xx_destroy,
2756 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2757 		.show = a6xx_show,
2758 #endif
2759 		.gpu_busy = a6xx_gpu_busy,
2760 		.gpu_get_freq = a6xx_gmu_get_freq,
2761 		.gpu_set_freq = a6xx_gpu_set_freq,
2762 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2763 		.gpu_state_get = a6xx_gpu_state_get,
2764 		.gpu_state_put = a6xx_gpu_state_put,
2765 #endif
2766 		.create_address_space = a6xx_create_address_space,
2767 		.create_private_address_space = a6xx_create_private_address_space,
2768 		.get_rptr = a6xx_get_rptr,
2769 		.progress = a6xx_progress,
2770 	},
2771 	.get_timestamp = a6xx_gmu_get_timestamp,
2772 };
2773 
2774 static const struct adreno_gpu_funcs funcs_gmuwrapper = {
2775 	.base = {
2776 		.get_param = adreno_get_param,
2777 		.set_param = adreno_set_param,
2778 		.hw_init = a6xx_hw_init,
2779 		.ucode_load = a6xx_ucode_load,
2780 		.pm_suspend = a6xx_pm_suspend,
2781 		.pm_resume = a6xx_pm_resume,
2782 		.recover = a6xx_recover,
2783 		.submit = a6xx_submit,
2784 		.active_ring = a6xx_active_ring,
2785 		.irq = a6xx_irq,
2786 		.destroy = a6xx_destroy,
2787 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2788 		.show = a6xx_show,
2789 #endif
2790 		.gpu_busy = a6xx_gpu_busy,
2791 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2792 		.gpu_state_get = a6xx_gpu_state_get,
2793 		.gpu_state_put = a6xx_gpu_state_put,
2794 #endif
2795 		.create_address_space = a6xx_create_address_space,
2796 		.create_private_address_space = a6xx_create_private_address_space,
2797 		.get_rptr = a6xx_get_rptr,
2798 		.progress = a6xx_progress,
2799 	},
2800 	.get_timestamp = a6xx_get_timestamp,
2801 };
2802 
2803 static const struct adreno_gpu_funcs funcs_a7xx = {
2804 	.base = {
2805 		.get_param = adreno_get_param,
2806 		.set_param = adreno_set_param,
2807 		.hw_init = a6xx_hw_init,
2808 		.ucode_load = a6xx_ucode_load,
2809 		.pm_suspend = a6xx_gmu_pm_suspend,
2810 		.pm_resume = a6xx_gmu_pm_resume,
2811 		.recover = a6xx_recover,
2812 		.submit = a7xx_submit,
2813 		.active_ring = a6xx_active_ring,
2814 		.irq = a6xx_irq,
2815 		.destroy = a6xx_destroy,
2816 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2817 		.show = a6xx_show,
2818 #endif
2819 		.gpu_busy = a6xx_gpu_busy,
2820 		.gpu_get_freq = a6xx_gmu_get_freq,
2821 		.gpu_set_freq = a6xx_gpu_set_freq,
2822 #if defined(CONFIG_DRM_MSM_GPU_STATE)
2823 		.gpu_state_get = a6xx_gpu_state_get,
2824 		.gpu_state_put = a6xx_gpu_state_put,
2825 #endif
2826 		.create_address_space = a6xx_create_address_space,
2827 		.create_private_address_space = a6xx_create_private_address_space,
2828 		.get_rptr = a6xx_get_rptr,
2829 		.progress = a6xx_progress,
2830 	},
2831 	.get_timestamp = a6xx_gmu_get_timestamp,
2832 };
2833 
2834 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
2835 {
2836 	struct msm_drm_private *priv = dev->dev_private;
2837 	struct platform_device *pdev = priv->gpu_pdev;
2838 	struct adreno_platform_config *config = pdev->dev.platform_data;
2839 	struct device_node *node;
2840 	struct a6xx_gpu *a6xx_gpu;
2841 	struct adreno_gpu *adreno_gpu;
2842 	struct msm_gpu *gpu;
2843 	bool is_a7xx;
2844 	int ret;
2845 
2846 	a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
2847 	if (!a6xx_gpu)
2848 		return ERR_PTR(-ENOMEM);
2849 
2850 	adreno_gpu = &a6xx_gpu->base;
2851 	gpu = &adreno_gpu->base;
2852 
2853 	mutex_init(&a6xx_gpu->gmu.lock);
2854 
2855 	adreno_gpu->registers = NULL;
2856 
2857 	/* Check if there is a GMU phandle and set it up */
2858 	node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
2859 	/* FIXME: How do we gracefully handle this? */
2860 	BUG_ON(!node);
2861 
2862 	adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
2863 
2864 	adreno_gpu->base.hw_apriv =
2865 		!!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
2866 
2867 	/* gpu->info only gets assigned in adreno_gpu_init() */
2868 	is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
2869 		  config->info->family == ADRENO_7XX_GEN2;
2870 
2871 	a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
2872 
2873 	ret = a6xx_set_supported_hw(&pdev->dev, config->info);
2874 	if (ret) {
2875 		a6xx_destroy(&(a6xx_gpu->base.base));
2876 		return ERR_PTR(ret);
2877 	}
2878 
2879 	if (is_a7xx)
2880 		ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
2881 	else if (adreno_has_gmu_wrapper(adreno_gpu))
2882 		ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
2883 	else
2884 		ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
2885 	if (ret) {
2886 		a6xx_destroy(&(a6xx_gpu->base.base));
2887 		return ERR_PTR(ret);
2888 	}
2889 
2890 	/*
2891 	 * For now only clamp to idle freq for devices where this is known not
2892 	 * to cause power supply issues:
2893 	 */
2894 	if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
2895 		priv->gpu_clamp_to_idle = true;
2896 
2897 	if (adreno_has_gmu_wrapper(adreno_gpu))
2898 		ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
2899 	else
2900 		ret = a6xx_gmu_init(a6xx_gpu, node);
2901 	of_node_put(node);
2902 	if (ret) {
2903 		a6xx_destroy(&(a6xx_gpu->base.base));
2904 		return ERR_PTR(ret);
2905 	}
2906 
2907 	if (gpu->aspace)
2908 		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
2909 				a6xx_fault_handler);
2910 
2911 	return gpu;
2912 }
2913