1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
3
4 #include "a2xx_gpu.h"
5 #include "msm_gem.h"
6 #include "msm_mmu.h"
7
8 extern bool hang_debug;
9
10 static void a2xx_dump(struct msm_gpu *gpu);
11 static bool a2xx_idle(struct msm_gpu *gpu);
12
a2xx_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
14 {
15 struct msm_ringbuffer *ring = submit->ring;
16 unsigned int i;
17
18 for (i = 0; i < submit->nr_cmds; i++) {
19 switch (submit->cmd[i].type) {
20 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
21 /* ignore IB-targets */
22 break;
23 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
24 /* ignore if there has not been a ctx switch: */
25 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
26 break;
27 fallthrough;
28 case MSM_SUBMIT_CMD_BUF:
29 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
31 OUT_RING(ring, submit->cmd[i].size);
32 OUT_PKT2(ring);
33 break;
34 }
35 }
36
37 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
38 OUT_RING(ring, submit->seqno);
39
40 /* wait for idle before cache flush/interrupt */
41 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
42 OUT_RING(ring, 0x00000000);
43
44 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
45 OUT_RING(ring, CACHE_FLUSH_TS);
46 OUT_RING(ring, rbmemptr(ring, fence));
47 OUT_RING(ring, submit->seqno);
48 OUT_PKT3(ring, CP_INTERRUPT, 1);
49 OUT_RING(ring, 0x80000000);
50
51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
52 }
53
a2xx_me_init(struct msm_gpu * gpu)54 static bool a2xx_me_init(struct msm_gpu *gpu)
55 {
56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
57 struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
58 struct msm_ringbuffer *ring = gpu->rb[0];
59
60 OUT_PKT3(ring, CP_ME_INIT, 18);
61
62 /* All fields present (bits 9:0) */
63 OUT_RING(ring, 0x000003ff);
64 /* Disable/Enable Real-Time Stream processing (present but ignored) */
65 OUT_RING(ring, 0x00000000);
66 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
67 OUT_RING(ring, 0x00000000);
68
69 OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
70 OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
71 OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
72 OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
73 OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
74 OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
75 OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
76 OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
77
78 /* Vertex and Pixel Shader Start Addresses in instructions
79 * (3 DWORDS per instruction) */
80 if (adreno_is_a225(adreno_gpu))
81 OUT_RING(ring, 0x80000300);
82 else
83 OUT_RING(ring, 0x80000180);
84 /* Maximum Contexts */
85 OUT_RING(ring, 0x00000001);
86 /* Write Confirm Interval and The CP will wait the
87 * wait_interval * 16 clocks between polling */
88 OUT_RING(ring, 0x00000000);
89 /* NQ and External Memory Swap */
90 OUT_RING(ring, 0x00000000);
91 /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
92 if (a2xx_gpu->protection_disabled)
93 OUT_RING(ring, 0x00000000);
94 else
95 OUT_RING(ring, 0x200001f2);
96 /* Disable header dumping and Header dump address */
97 OUT_RING(ring, 0x00000000);
98 /* Header dump size */
99 OUT_RING(ring, 0x00000000);
100
101 if (!a2xx_gpu->protection_disabled) {
102 /* enable protected mode */
103 OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
104 OUT_RING(ring, 1);
105 }
106
107 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
108 return a2xx_idle(gpu);
109 }
110
a2xx_hw_init(struct msm_gpu * gpu)111 static int a2xx_hw_init(struct msm_gpu *gpu)
112 {
113 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
114 struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
115 dma_addr_t pt_base, tran_error;
116 uint32_t *ptr, len;
117 int i, ret;
118
119 a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error);
120
121 DBG("%s", gpu->name);
122
123 /* halt ME to avoid ucode upload issues on a20x */
124 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
125
126 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
127 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
128
129 /* note: kgsl uses 0x00000001 after first reset on a22x */
130 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
131 msleep(30);
132 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
133
134 if (adreno_is_a225(adreno_gpu))
135 gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
136
137 /* note: kgsl uses 0x0000ffff for a20x */
138 gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
139
140 /* MPU: physical range */
141 gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
142 gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
143
144 gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
145 A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
146 A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
147 A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
148 A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
149 A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
150 A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
151 A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
152 A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
153 A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
154 A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
155 A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
156
157 /* same as parameters in adreno_gpu */
158 gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
159 A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
160
161 gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
162 gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
163
164 gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
165 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
166 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
167
168 gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
169 A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
170 A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
171 A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
172 A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
173 A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
174 A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
175 A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
176 A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
177 A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
178 A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
179 A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
180 A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
181 A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
182 if (!adreno_is_a20x(adreno_gpu))
183 gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
184
185 gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
186 gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
187
188 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
189 gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
190
191 /* note: gsl doesn't set this */
192 gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
193
194 gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
195 A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
196 gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
197 AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
198 AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
199 AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
200 AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
201 AXXX_CP_INT_CNTL_IB_ERROR_MASK |
202 AXXX_CP_INT_CNTL_IB1_INT_MASK |
203 AXXX_CP_INT_CNTL_RB_INT_MASK);
204 gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
205 gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
206 A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
207 A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
208 A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
209
210 for (i = 3; i <= 5; i++)
211 if ((SZ_16K << i) == adreno_gpu->info->gmem)
212 break;
213 gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
214
215 ret = adreno_hw_init(gpu);
216 if (ret)
217 return ret;
218
219 gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
220 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
221
222 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
223
224 /* NOTE: PM4/micro-engine firmware registers look to be the same
225 * for a2xx and a3xx.. we could possibly push that part down to
226 * adreno_gpu base class. Or push both PM4 and PFP but
227 * parameterize the pfp ucode addr/data registers..
228 */
229
230 /* Load PM4: */
231 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
232 len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
233 DBG("loading PM4 ucode version: %x", ptr[1]);
234
235 /*
236 * New firmware files seem to have GPU and firmware version in this
237 * word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
238 * Older firmware files, which lack protection support, have 0 instead.
239 */
240 if (ptr[1] == 0 && !a2xx_gpu->protection_disabled) {
241 dev_warn(gpu->dev->dev,
242 "Legacy firmware detected, disabling protection support\n");
243 a2xx_gpu->protection_disabled = true;
244 }
245
246 gpu_write(gpu, REG_AXXX_CP_DEBUG,
247 AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
248 gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
249 for (i = 1; i < len; i++)
250 gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
251
252 /* Load PFP: */
253 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
254 len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
255 DBG("loading PFP ucode version: %x", ptr[5]);
256
257 gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
258 for (i = 1; i < len; i++)
259 gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
260
261 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
262
263 /* clear ME_HALT to start micro engine */
264 gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
265
266 return a2xx_me_init(gpu) ? 0 : -EINVAL;
267 }
268
a2xx_recover(struct msm_gpu * gpu)269 static void a2xx_recover(struct msm_gpu *gpu)
270 {
271 int i;
272
273 adreno_dump_info(gpu);
274
275 for (i = 0; i < 8; i++) {
276 printk("CP_SCRATCH_REG%d: %u\n", i,
277 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
278 }
279
280 /* dump registers before resetting gpu, if enabled: */
281 if (hang_debug)
282 a2xx_dump(gpu);
283
284 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
285 gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
286 gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
287 adreno_recover(gpu);
288 }
289
a2xx_destroy(struct msm_gpu * gpu)290 static void a2xx_destroy(struct msm_gpu *gpu)
291 {
292 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
293 struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
294
295 DBG("%s", gpu->name);
296
297 adreno_gpu_cleanup(adreno_gpu);
298
299 kfree(a2xx_gpu);
300 }
301
a2xx_idle(struct msm_gpu * gpu)302 static bool a2xx_idle(struct msm_gpu *gpu)
303 {
304 /* wait for ringbuffer to drain: */
305 if (!adreno_idle(gpu, gpu->rb[0]))
306 return false;
307
308 /* then wait for GPU to finish: */
309 if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
310 A2XX_RBBM_STATUS_GUI_ACTIVE))) {
311 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
312
313 /* TODO maybe we need to reset GPU here to recover from hang? */
314 return false;
315 }
316
317 return true;
318 }
319
a2xx_irq(struct msm_gpu * gpu)320 static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
321 {
322 uint32_t mstatus, status;
323
324 mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
325
326 if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
327 status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
328
329 dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
330 dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
331 gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
332
333 gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
334 }
335
336 if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
337 status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
338
339 /* only RB_INT is expected */
340 if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
341 dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
342
343 gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
344 }
345
346 if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
347 status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
348
349 dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
350
351 gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
352 }
353
354 msm_gpu_retire(gpu);
355
356 return IRQ_HANDLED;
357 }
358
359 static const unsigned int a200_registers[] = {
360 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
361 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
362 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
363 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
364 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
365 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
366 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
367 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
368 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
369 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
370 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
371 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
372 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
373 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
374 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
375 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
376 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
377 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
378 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
379 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
380 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
381 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
382 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
383 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
384 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
385 ~0 /* sentinel */
386 };
387
388 static const unsigned int a220_registers[] = {
389 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
390 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
391 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
392 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
393 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
394 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
395 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
396 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
397 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
398 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
399 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
400 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
401 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
402 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
403 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
404 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
405 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
406 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
407 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
408 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
409 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
410 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
411 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
412 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
413 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
414 0x4900, 0x4900, 0x4908, 0x4908,
415 ~0 /* sentinel */
416 };
417
418 static const unsigned int a225_registers[] = {
419 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
420 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
421 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
422 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
423 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
424 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
425 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
426 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
427 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
428 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
429 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
430 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
431 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
432 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
433 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
434 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
435 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
436 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
437 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
438 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
439 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
440 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
441 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
442 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
443 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
444 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
445 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
446 0x4908, 0x4908,
447 ~0 /* sentinel */
448 };
449
450 /* would be nice to not have to duplicate the _show() stuff with printk(): */
a2xx_dump(struct msm_gpu * gpu)451 static void a2xx_dump(struct msm_gpu *gpu)
452 {
453 printk("status: %08x\n",
454 gpu_read(gpu, REG_A2XX_RBBM_STATUS));
455 adreno_dump(gpu);
456 }
457
a2xx_gpu_state_get(struct msm_gpu * gpu)458 static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
459 {
460 struct msm_gpu_state *state = kzalloc_obj(*state);
461
462 if (!state)
463 return ERR_PTR(-ENOMEM);
464
465 adreno_gpu_state_get(gpu, state);
466
467 state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
468
469 return state;
470 }
471
472 static struct drm_gpuvm *
a2xx_create_vm(struct msm_gpu * gpu,struct platform_device * pdev)473 a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
474 {
475 struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
476 struct drm_gpuvm *vm;
477
478 vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true);
479
480 if (IS_ERR(vm) && !IS_ERR(mmu))
481 mmu->funcs->destroy(mmu);
482
483 return vm;
484 }
485
a2xx_get_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)486 static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
487 {
488 ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
489 return ring->memptrs->rptr;
490 }
491
492 static const struct msm_gpu_perfcntr perfcntrs[] = {
493 /* TODO */
494 };
495
a2xx_gpu_init(struct drm_device * dev)496 static struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
497 {
498 struct a2xx_gpu *a2xx_gpu = NULL;
499 struct adreno_gpu *adreno_gpu;
500 struct msm_gpu *gpu;
501 struct msm_drm_private *priv = dev->dev_private;
502 struct platform_device *pdev = priv->gpu_pdev;
503 struct adreno_platform_config *config = pdev->dev.platform_data;
504 int ret;
505
506 if (!pdev) {
507 dev_err(dev->dev, "no a2xx device\n");
508 ret = -ENXIO;
509 goto fail;
510 }
511
512 a2xx_gpu = kzalloc_obj(*a2xx_gpu);
513 if (!a2xx_gpu) {
514 ret = -ENOMEM;
515 goto fail;
516 }
517
518 adreno_gpu = &a2xx_gpu->base;
519 gpu = &adreno_gpu->base;
520
521 gpu->perfcntrs = perfcntrs;
522 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
523
524 ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
525 if (ret)
526 goto fail;
527
528 if (adreno_is_a20x(adreno_gpu))
529 adreno_gpu->registers = a200_registers;
530 else if (adreno_is_a225(adreno_gpu))
531 adreno_gpu->registers = a225_registers;
532 else
533 adreno_gpu->registers = a220_registers;
534
535 return gpu;
536
537 fail:
538 if (a2xx_gpu)
539 a2xx_destroy(&a2xx_gpu->base.base);
540
541 return ERR_PTR(ret);
542 }
543
544 const struct adreno_gpu_funcs a2xx_gpu_funcs = {
545 .base = {
546 .get_param = adreno_get_param,
547 .set_param = adreno_set_param,
548 .hw_init = a2xx_hw_init,
549 .pm_suspend = msm_gpu_pm_suspend,
550 .pm_resume = msm_gpu_pm_resume,
551 .recover = a2xx_recover,
552 .submit = a2xx_submit,
553 .active_ring = adreno_active_ring,
554 .irq = a2xx_irq,
555 .destroy = a2xx_destroy,
556 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
557 .show = adreno_show,
558 #endif
559 .gpu_state_get = a2xx_gpu_state_get,
560 .gpu_state_put = adreno_gpu_state_put,
561 .create_vm = a2xx_create_vm,
562 .get_rptr = a2xx_get_rptr,
563 },
564 .init = a2xx_gpu_init,
565 };
566