xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_ih.h"
29 #include "amdgpu_gfx.h"
30 #include "cikd.h"
31 #include "cik.h"
32 #include "cik_structs.h"
33 #include "atom.h"
34 #include "amdgpu_ucode.h"
35 #include "clearstate_ci.h"
36 
37 #include "dce/dce_8_0_d.h"
38 #include "dce/dce_8_0_sh_mask.h"
39 
40 #include "bif/bif_4_1_d.h"
41 #include "bif/bif_4_1_sh_mask.h"
42 
43 #include "gca/gfx_7_0_d.h"
44 #include "gca/gfx_7_2_enum.h"
45 #include "gca/gfx_7_2_sh_mask.h"
46 
47 #include "gmc/gmc_7_0_d.h"
48 #include "gmc/gmc_7_0_sh_mask.h"
49 
50 #include "oss/oss_2_0_d.h"
51 #include "oss/oss_2_0_sh_mask.h"
52 
53 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
54 
55 #define GFX7_NUM_GFX_RINGS     1
56 #define GFX7_MEC_HPD_SIZE      2048
57 
58 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
59 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
60 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
61 
62 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
63 MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
64 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
65 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
66 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
67 
68 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
70 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
71 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
72 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
73 
74 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
76 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
77 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
78 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
79 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
80 
81 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/kabini_me.bin");
83 MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
84 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
86 
87 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/mullins_me.bin");
89 MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
90 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
91 MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
92 
93 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = {
94 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
95 	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
96 	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
97 	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
98 	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
99 	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
100 	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
101 	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
102 	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
103 	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
104 	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
105 	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
106 	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
107 	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
108 	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
109 	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
110 };
111 
112 static const u32 spectre_rlc_save_restore_register_list[] = {
113 	(0x0e00 << 16) | (0xc12c >> 2),
114 	0x00000000,
115 	(0x0e00 << 16) | (0xc140 >> 2),
116 	0x00000000,
117 	(0x0e00 << 16) | (0xc150 >> 2),
118 	0x00000000,
119 	(0x0e00 << 16) | (0xc15c >> 2),
120 	0x00000000,
121 	(0x0e00 << 16) | (0xc168 >> 2),
122 	0x00000000,
123 	(0x0e00 << 16) | (0xc170 >> 2),
124 	0x00000000,
125 	(0x0e00 << 16) | (0xc178 >> 2),
126 	0x00000000,
127 	(0x0e00 << 16) | (0xc204 >> 2),
128 	0x00000000,
129 	(0x0e00 << 16) | (0xc2b4 >> 2),
130 	0x00000000,
131 	(0x0e00 << 16) | (0xc2b8 >> 2),
132 	0x00000000,
133 	(0x0e00 << 16) | (0xc2bc >> 2),
134 	0x00000000,
135 	(0x0e00 << 16) | (0xc2c0 >> 2),
136 	0x00000000,
137 	(0x0e00 << 16) | (0x8228 >> 2),
138 	0x00000000,
139 	(0x0e00 << 16) | (0x829c >> 2),
140 	0x00000000,
141 	(0x0e00 << 16) | (0x869c >> 2),
142 	0x00000000,
143 	(0x0600 << 16) | (0x98f4 >> 2),
144 	0x00000000,
145 	(0x0e00 << 16) | (0x98f8 >> 2),
146 	0x00000000,
147 	(0x0e00 << 16) | (0x9900 >> 2),
148 	0x00000000,
149 	(0x0e00 << 16) | (0xc260 >> 2),
150 	0x00000000,
151 	(0x0e00 << 16) | (0x90e8 >> 2),
152 	0x00000000,
153 	(0x0e00 << 16) | (0x3c000 >> 2),
154 	0x00000000,
155 	(0x0e00 << 16) | (0x3c00c >> 2),
156 	0x00000000,
157 	(0x0e00 << 16) | (0x8c1c >> 2),
158 	0x00000000,
159 	(0x0e00 << 16) | (0x9700 >> 2),
160 	0x00000000,
161 	(0x0e00 << 16) | (0xcd20 >> 2),
162 	0x00000000,
163 	(0x4e00 << 16) | (0xcd20 >> 2),
164 	0x00000000,
165 	(0x5e00 << 16) | (0xcd20 >> 2),
166 	0x00000000,
167 	(0x6e00 << 16) | (0xcd20 >> 2),
168 	0x00000000,
169 	(0x7e00 << 16) | (0xcd20 >> 2),
170 	0x00000000,
171 	(0x8e00 << 16) | (0xcd20 >> 2),
172 	0x00000000,
173 	(0x9e00 << 16) | (0xcd20 >> 2),
174 	0x00000000,
175 	(0xae00 << 16) | (0xcd20 >> 2),
176 	0x00000000,
177 	(0xbe00 << 16) | (0xcd20 >> 2),
178 	0x00000000,
179 	(0x0e00 << 16) | (0x89bc >> 2),
180 	0x00000000,
181 	(0x0e00 << 16) | (0x8900 >> 2),
182 	0x00000000,
183 	0x3,
184 	(0x0e00 << 16) | (0xc130 >> 2),
185 	0x00000000,
186 	(0x0e00 << 16) | (0xc134 >> 2),
187 	0x00000000,
188 	(0x0e00 << 16) | (0xc1fc >> 2),
189 	0x00000000,
190 	(0x0e00 << 16) | (0xc208 >> 2),
191 	0x00000000,
192 	(0x0e00 << 16) | (0xc264 >> 2),
193 	0x00000000,
194 	(0x0e00 << 16) | (0xc268 >> 2),
195 	0x00000000,
196 	(0x0e00 << 16) | (0xc26c >> 2),
197 	0x00000000,
198 	(0x0e00 << 16) | (0xc270 >> 2),
199 	0x00000000,
200 	(0x0e00 << 16) | (0xc274 >> 2),
201 	0x00000000,
202 	(0x0e00 << 16) | (0xc278 >> 2),
203 	0x00000000,
204 	(0x0e00 << 16) | (0xc27c >> 2),
205 	0x00000000,
206 	(0x0e00 << 16) | (0xc280 >> 2),
207 	0x00000000,
208 	(0x0e00 << 16) | (0xc284 >> 2),
209 	0x00000000,
210 	(0x0e00 << 16) | (0xc288 >> 2),
211 	0x00000000,
212 	(0x0e00 << 16) | (0xc28c >> 2),
213 	0x00000000,
214 	(0x0e00 << 16) | (0xc290 >> 2),
215 	0x00000000,
216 	(0x0e00 << 16) | (0xc294 >> 2),
217 	0x00000000,
218 	(0x0e00 << 16) | (0xc298 >> 2),
219 	0x00000000,
220 	(0x0e00 << 16) | (0xc29c >> 2),
221 	0x00000000,
222 	(0x0e00 << 16) | (0xc2a0 >> 2),
223 	0x00000000,
224 	(0x0e00 << 16) | (0xc2a4 >> 2),
225 	0x00000000,
226 	(0x0e00 << 16) | (0xc2a8 >> 2),
227 	0x00000000,
228 	(0x0e00 << 16) | (0xc2ac  >> 2),
229 	0x00000000,
230 	(0x0e00 << 16) | (0xc2b0 >> 2),
231 	0x00000000,
232 	(0x0e00 << 16) | (0x301d0 >> 2),
233 	0x00000000,
234 	(0x0e00 << 16) | (0x30238 >> 2),
235 	0x00000000,
236 	(0x0e00 << 16) | (0x30250 >> 2),
237 	0x00000000,
238 	(0x0e00 << 16) | (0x30254 >> 2),
239 	0x00000000,
240 	(0x0e00 << 16) | (0x30258 >> 2),
241 	0x00000000,
242 	(0x0e00 << 16) | (0x3025c >> 2),
243 	0x00000000,
244 	(0x4e00 << 16) | (0xc900 >> 2),
245 	0x00000000,
246 	(0x5e00 << 16) | (0xc900 >> 2),
247 	0x00000000,
248 	(0x6e00 << 16) | (0xc900 >> 2),
249 	0x00000000,
250 	(0x7e00 << 16) | (0xc900 >> 2),
251 	0x00000000,
252 	(0x8e00 << 16) | (0xc900 >> 2),
253 	0x00000000,
254 	(0x9e00 << 16) | (0xc900 >> 2),
255 	0x00000000,
256 	(0xae00 << 16) | (0xc900 >> 2),
257 	0x00000000,
258 	(0xbe00 << 16) | (0xc900 >> 2),
259 	0x00000000,
260 	(0x4e00 << 16) | (0xc904 >> 2),
261 	0x00000000,
262 	(0x5e00 << 16) | (0xc904 >> 2),
263 	0x00000000,
264 	(0x6e00 << 16) | (0xc904 >> 2),
265 	0x00000000,
266 	(0x7e00 << 16) | (0xc904 >> 2),
267 	0x00000000,
268 	(0x8e00 << 16) | (0xc904 >> 2),
269 	0x00000000,
270 	(0x9e00 << 16) | (0xc904 >> 2),
271 	0x00000000,
272 	(0xae00 << 16) | (0xc904 >> 2),
273 	0x00000000,
274 	(0xbe00 << 16) | (0xc904 >> 2),
275 	0x00000000,
276 	(0x4e00 << 16) | (0xc908 >> 2),
277 	0x00000000,
278 	(0x5e00 << 16) | (0xc908 >> 2),
279 	0x00000000,
280 	(0x6e00 << 16) | (0xc908 >> 2),
281 	0x00000000,
282 	(0x7e00 << 16) | (0xc908 >> 2),
283 	0x00000000,
284 	(0x8e00 << 16) | (0xc908 >> 2),
285 	0x00000000,
286 	(0x9e00 << 16) | (0xc908 >> 2),
287 	0x00000000,
288 	(0xae00 << 16) | (0xc908 >> 2),
289 	0x00000000,
290 	(0xbe00 << 16) | (0xc908 >> 2),
291 	0x00000000,
292 	(0x4e00 << 16) | (0xc90c >> 2),
293 	0x00000000,
294 	(0x5e00 << 16) | (0xc90c >> 2),
295 	0x00000000,
296 	(0x6e00 << 16) | (0xc90c >> 2),
297 	0x00000000,
298 	(0x7e00 << 16) | (0xc90c >> 2),
299 	0x00000000,
300 	(0x8e00 << 16) | (0xc90c >> 2),
301 	0x00000000,
302 	(0x9e00 << 16) | (0xc90c >> 2),
303 	0x00000000,
304 	(0xae00 << 16) | (0xc90c >> 2),
305 	0x00000000,
306 	(0xbe00 << 16) | (0xc90c >> 2),
307 	0x00000000,
308 	(0x4e00 << 16) | (0xc910 >> 2),
309 	0x00000000,
310 	(0x5e00 << 16) | (0xc910 >> 2),
311 	0x00000000,
312 	(0x6e00 << 16) | (0xc910 >> 2),
313 	0x00000000,
314 	(0x7e00 << 16) | (0xc910 >> 2),
315 	0x00000000,
316 	(0x8e00 << 16) | (0xc910 >> 2),
317 	0x00000000,
318 	(0x9e00 << 16) | (0xc910 >> 2),
319 	0x00000000,
320 	(0xae00 << 16) | (0xc910 >> 2),
321 	0x00000000,
322 	(0xbe00 << 16) | (0xc910 >> 2),
323 	0x00000000,
324 	(0x0e00 << 16) | (0xc99c >> 2),
325 	0x00000000,
326 	(0x0e00 << 16) | (0x9834 >> 2),
327 	0x00000000,
328 	(0x0000 << 16) | (0x30f00 >> 2),
329 	0x00000000,
330 	(0x0001 << 16) | (0x30f00 >> 2),
331 	0x00000000,
332 	(0x0000 << 16) | (0x30f04 >> 2),
333 	0x00000000,
334 	(0x0001 << 16) | (0x30f04 >> 2),
335 	0x00000000,
336 	(0x0000 << 16) | (0x30f08 >> 2),
337 	0x00000000,
338 	(0x0001 << 16) | (0x30f08 >> 2),
339 	0x00000000,
340 	(0x0000 << 16) | (0x30f0c >> 2),
341 	0x00000000,
342 	(0x0001 << 16) | (0x30f0c >> 2),
343 	0x00000000,
344 	(0x0600 << 16) | (0x9b7c >> 2),
345 	0x00000000,
346 	(0x0e00 << 16) | (0x8a14 >> 2),
347 	0x00000000,
348 	(0x0e00 << 16) | (0x8a18 >> 2),
349 	0x00000000,
350 	(0x0600 << 16) | (0x30a00 >> 2),
351 	0x00000000,
352 	(0x0e00 << 16) | (0x8bf0 >> 2),
353 	0x00000000,
354 	(0x0e00 << 16) | (0x8bcc >> 2),
355 	0x00000000,
356 	(0x0e00 << 16) | (0x8b24 >> 2),
357 	0x00000000,
358 	(0x0e00 << 16) | (0x30a04 >> 2),
359 	0x00000000,
360 	(0x0600 << 16) | (0x30a10 >> 2),
361 	0x00000000,
362 	(0x0600 << 16) | (0x30a14 >> 2),
363 	0x00000000,
364 	(0x0600 << 16) | (0x30a18 >> 2),
365 	0x00000000,
366 	(0x0600 << 16) | (0x30a2c >> 2),
367 	0x00000000,
368 	(0x0e00 << 16) | (0xc700 >> 2),
369 	0x00000000,
370 	(0x0e00 << 16) | (0xc704 >> 2),
371 	0x00000000,
372 	(0x0e00 << 16) | (0xc708 >> 2),
373 	0x00000000,
374 	(0x0e00 << 16) | (0xc768 >> 2),
375 	0x00000000,
376 	(0x0400 << 16) | (0xc770 >> 2),
377 	0x00000000,
378 	(0x0400 << 16) | (0xc774 >> 2),
379 	0x00000000,
380 	(0x0400 << 16) | (0xc778 >> 2),
381 	0x00000000,
382 	(0x0400 << 16) | (0xc77c >> 2),
383 	0x00000000,
384 	(0x0400 << 16) | (0xc780 >> 2),
385 	0x00000000,
386 	(0x0400 << 16) | (0xc784 >> 2),
387 	0x00000000,
388 	(0x0400 << 16) | (0xc788 >> 2),
389 	0x00000000,
390 	(0x0400 << 16) | (0xc78c >> 2),
391 	0x00000000,
392 	(0x0400 << 16) | (0xc798 >> 2),
393 	0x00000000,
394 	(0x0400 << 16) | (0xc79c >> 2),
395 	0x00000000,
396 	(0x0400 << 16) | (0xc7a0 >> 2),
397 	0x00000000,
398 	(0x0400 << 16) | (0xc7a4 >> 2),
399 	0x00000000,
400 	(0x0400 << 16) | (0xc7a8 >> 2),
401 	0x00000000,
402 	(0x0400 << 16) | (0xc7ac >> 2),
403 	0x00000000,
404 	(0x0400 << 16) | (0xc7b0 >> 2),
405 	0x00000000,
406 	(0x0400 << 16) | (0xc7b4 >> 2),
407 	0x00000000,
408 	(0x0e00 << 16) | (0x9100 >> 2),
409 	0x00000000,
410 	(0x0e00 << 16) | (0x3c010 >> 2),
411 	0x00000000,
412 	(0x0e00 << 16) | (0x92a8 >> 2),
413 	0x00000000,
414 	(0x0e00 << 16) | (0x92ac >> 2),
415 	0x00000000,
416 	(0x0e00 << 16) | (0x92b4 >> 2),
417 	0x00000000,
418 	(0x0e00 << 16) | (0x92b8 >> 2),
419 	0x00000000,
420 	(0x0e00 << 16) | (0x92bc >> 2),
421 	0x00000000,
422 	(0x0e00 << 16) | (0x92c0 >> 2),
423 	0x00000000,
424 	(0x0e00 << 16) | (0x92c4 >> 2),
425 	0x00000000,
426 	(0x0e00 << 16) | (0x92c8 >> 2),
427 	0x00000000,
428 	(0x0e00 << 16) | (0x92cc >> 2),
429 	0x00000000,
430 	(0x0e00 << 16) | (0x92d0 >> 2),
431 	0x00000000,
432 	(0x0e00 << 16) | (0x8c00 >> 2),
433 	0x00000000,
434 	(0x0e00 << 16) | (0x8c04 >> 2),
435 	0x00000000,
436 	(0x0e00 << 16) | (0x8c20 >> 2),
437 	0x00000000,
438 	(0x0e00 << 16) | (0x8c38 >> 2),
439 	0x00000000,
440 	(0x0e00 << 16) | (0x8c3c >> 2),
441 	0x00000000,
442 	(0x0e00 << 16) | (0xae00 >> 2),
443 	0x00000000,
444 	(0x0e00 << 16) | (0x9604 >> 2),
445 	0x00000000,
446 	(0x0e00 << 16) | (0xac08 >> 2),
447 	0x00000000,
448 	(0x0e00 << 16) | (0xac0c >> 2),
449 	0x00000000,
450 	(0x0e00 << 16) | (0xac10 >> 2),
451 	0x00000000,
452 	(0x0e00 << 16) | (0xac14 >> 2),
453 	0x00000000,
454 	(0x0e00 << 16) | (0xac58 >> 2),
455 	0x00000000,
456 	(0x0e00 << 16) | (0xac68 >> 2),
457 	0x00000000,
458 	(0x0e00 << 16) | (0xac6c >> 2),
459 	0x00000000,
460 	(0x0e00 << 16) | (0xac70 >> 2),
461 	0x00000000,
462 	(0x0e00 << 16) | (0xac74 >> 2),
463 	0x00000000,
464 	(0x0e00 << 16) | (0xac78 >> 2),
465 	0x00000000,
466 	(0x0e00 << 16) | (0xac7c >> 2),
467 	0x00000000,
468 	(0x0e00 << 16) | (0xac80 >> 2),
469 	0x00000000,
470 	(0x0e00 << 16) | (0xac84 >> 2),
471 	0x00000000,
472 	(0x0e00 << 16) | (0xac88 >> 2),
473 	0x00000000,
474 	(0x0e00 << 16) | (0xac8c >> 2),
475 	0x00000000,
476 	(0x0e00 << 16) | (0x970c >> 2),
477 	0x00000000,
478 	(0x0e00 << 16) | (0x9714 >> 2),
479 	0x00000000,
480 	(0x0e00 << 16) | (0x9718 >> 2),
481 	0x00000000,
482 	(0x0e00 << 16) | (0x971c >> 2),
483 	0x00000000,
484 	(0x0e00 << 16) | (0x31068 >> 2),
485 	0x00000000,
486 	(0x4e00 << 16) | (0x31068 >> 2),
487 	0x00000000,
488 	(0x5e00 << 16) | (0x31068 >> 2),
489 	0x00000000,
490 	(0x6e00 << 16) | (0x31068 >> 2),
491 	0x00000000,
492 	(0x7e00 << 16) | (0x31068 >> 2),
493 	0x00000000,
494 	(0x8e00 << 16) | (0x31068 >> 2),
495 	0x00000000,
496 	(0x9e00 << 16) | (0x31068 >> 2),
497 	0x00000000,
498 	(0xae00 << 16) | (0x31068 >> 2),
499 	0x00000000,
500 	(0xbe00 << 16) | (0x31068 >> 2),
501 	0x00000000,
502 	(0x0e00 << 16) | (0xcd10 >> 2),
503 	0x00000000,
504 	(0x0e00 << 16) | (0xcd14 >> 2),
505 	0x00000000,
506 	(0x0e00 << 16) | (0x88b0 >> 2),
507 	0x00000000,
508 	(0x0e00 << 16) | (0x88b4 >> 2),
509 	0x00000000,
510 	(0x0e00 << 16) | (0x88b8 >> 2),
511 	0x00000000,
512 	(0x0e00 << 16) | (0x88bc >> 2),
513 	0x00000000,
514 	(0x0400 << 16) | (0x89c0 >> 2),
515 	0x00000000,
516 	(0x0e00 << 16) | (0x88c4 >> 2),
517 	0x00000000,
518 	(0x0e00 << 16) | (0x88c8 >> 2),
519 	0x00000000,
520 	(0x0e00 << 16) | (0x88d0 >> 2),
521 	0x00000000,
522 	(0x0e00 << 16) | (0x88d4 >> 2),
523 	0x00000000,
524 	(0x0e00 << 16) | (0x88d8 >> 2),
525 	0x00000000,
526 	(0x0e00 << 16) | (0x8980 >> 2),
527 	0x00000000,
528 	(0x0e00 << 16) | (0x30938 >> 2),
529 	0x00000000,
530 	(0x0e00 << 16) | (0x3093c >> 2),
531 	0x00000000,
532 	(0x0e00 << 16) | (0x30940 >> 2),
533 	0x00000000,
534 	(0x0e00 << 16) | (0x89a0 >> 2),
535 	0x00000000,
536 	(0x0e00 << 16) | (0x30900 >> 2),
537 	0x00000000,
538 	(0x0e00 << 16) | (0x30904 >> 2),
539 	0x00000000,
540 	(0x0e00 << 16) | (0x89b4 >> 2),
541 	0x00000000,
542 	(0x0e00 << 16) | (0x3c210 >> 2),
543 	0x00000000,
544 	(0x0e00 << 16) | (0x3c214 >> 2),
545 	0x00000000,
546 	(0x0e00 << 16) | (0x3c218 >> 2),
547 	0x00000000,
548 	(0x0e00 << 16) | (0x8904 >> 2),
549 	0x00000000,
550 	0x5,
551 	(0x0e00 << 16) | (0x8c28 >> 2),
552 	(0x0e00 << 16) | (0x8c2c >> 2),
553 	(0x0e00 << 16) | (0x8c30 >> 2),
554 	(0x0e00 << 16) | (0x8c34 >> 2),
555 	(0x0e00 << 16) | (0x9600 >> 2),
556 };
557 
558 static const u32 kalindi_rlc_save_restore_register_list[] = {
559 	(0x0e00 << 16) | (0xc12c >> 2),
560 	0x00000000,
561 	(0x0e00 << 16) | (0xc140 >> 2),
562 	0x00000000,
563 	(0x0e00 << 16) | (0xc150 >> 2),
564 	0x00000000,
565 	(0x0e00 << 16) | (0xc15c >> 2),
566 	0x00000000,
567 	(0x0e00 << 16) | (0xc168 >> 2),
568 	0x00000000,
569 	(0x0e00 << 16) | (0xc170 >> 2),
570 	0x00000000,
571 	(0x0e00 << 16) | (0xc204 >> 2),
572 	0x00000000,
573 	(0x0e00 << 16) | (0xc2b4 >> 2),
574 	0x00000000,
575 	(0x0e00 << 16) | (0xc2b8 >> 2),
576 	0x00000000,
577 	(0x0e00 << 16) | (0xc2bc >> 2),
578 	0x00000000,
579 	(0x0e00 << 16) | (0xc2c0 >> 2),
580 	0x00000000,
581 	(0x0e00 << 16) | (0x8228 >> 2),
582 	0x00000000,
583 	(0x0e00 << 16) | (0x829c >> 2),
584 	0x00000000,
585 	(0x0e00 << 16) | (0x869c >> 2),
586 	0x00000000,
587 	(0x0600 << 16) | (0x98f4 >> 2),
588 	0x00000000,
589 	(0x0e00 << 16) | (0x98f8 >> 2),
590 	0x00000000,
591 	(0x0e00 << 16) | (0x9900 >> 2),
592 	0x00000000,
593 	(0x0e00 << 16) | (0xc260 >> 2),
594 	0x00000000,
595 	(0x0e00 << 16) | (0x90e8 >> 2),
596 	0x00000000,
597 	(0x0e00 << 16) | (0x3c000 >> 2),
598 	0x00000000,
599 	(0x0e00 << 16) | (0x3c00c >> 2),
600 	0x00000000,
601 	(0x0e00 << 16) | (0x8c1c >> 2),
602 	0x00000000,
603 	(0x0e00 << 16) | (0x9700 >> 2),
604 	0x00000000,
605 	(0x0e00 << 16) | (0xcd20 >> 2),
606 	0x00000000,
607 	(0x4e00 << 16) | (0xcd20 >> 2),
608 	0x00000000,
609 	(0x5e00 << 16) | (0xcd20 >> 2),
610 	0x00000000,
611 	(0x6e00 << 16) | (0xcd20 >> 2),
612 	0x00000000,
613 	(0x7e00 << 16) | (0xcd20 >> 2),
614 	0x00000000,
615 	(0x0e00 << 16) | (0x89bc >> 2),
616 	0x00000000,
617 	(0x0e00 << 16) | (0x8900 >> 2),
618 	0x00000000,
619 	0x3,
620 	(0x0e00 << 16) | (0xc130 >> 2),
621 	0x00000000,
622 	(0x0e00 << 16) | (0xc134 >> 2),
623 	0x00000000,
624 	(0x0e00 << 16) | (0xc1fc >> 2),
625 	0x00000000,
626 	(0x0e00 << 16) | (0xc208 >> 2),
627 	0x00000000,
628 	(0x0e00 << 16) | (0xc264 >> 2),
629 	0x00000000,
630 	(0x0e00 << 16) | (0xc268 >> 2),
631 	0x00000000,
632 	(0x0e00 << 16) | (0xc26c >> 2),
633 	0x00000000,
634 	(0x0e00 << 16) | (0xc270 >> 2),
635 	0x00000000,
636 	(0x0e00 << 16) | (0xc274 >> 2),
637 	0x00000000,
638 	(0x0e00 << 16) | (0xc28c >> 2),
639 	0x00000000,
640 	(0x0e00 << 16) | (0xc290 >> 2),
641 	0x00000000,
642 	(0x0e00 << 16) | (0xc294 >> 2),
643 	0x00000000,
644 	(0x0e00 << 16) | (0xc298 >> 2),
645 	0x00000000,
646 	(0x0e00 << 16) | (0xc2a0 >> 2),
647 	0x00000000,
648 	(0x0e00 << 16) | (0xc2a4 >> 2),
649 	0x00000000,
650 	(0x0e00 << 16) | (0xc2a8 >> 2),
651 	0x00000000,
652 	(0x0e00 << 16) | (0xc2ac >> 2),
653 	0x00000000,
654 	(0x0e00 << 16) | (0x301d0 >> 2),
655 	0x00000000,
656 	(0x0e00 << 16) | (0x30238 >> 2),
657 	0x00000000,
658 	(0x0e00 << 16) | (0x30250 >> 2),
659 	0x00000000,
660 	(0x0e00 << 16) | (0x30254 >> 2),
661 	0x00000000,
662 	(0x0e00 << 16) | (0x30258 >> 2),
663 	0x00000000,
664 	(0x0e00 << 16) | (0x3025c >> 2),
665 	0x00000000,
666 	(0x4e00 << 16) | (0xc900 >> 2),
667 	0x00000000,
668 	(0x5e00 << 16) | (0xc900 >> 2),
669 	0x00000000,
670 	(0x6e00 << 16) | (0xc900 >> 2),
671 	0x00000000,
672 	(0x7e00 << 16) | (0xc900 >> 2),
673 	0x00000000,
674 	(0x4e00 << 16) | (0xc904 >> 2),
675 	0x00000000,
676 	(0x5e00 << 16) | (0xc904 >> 2),
677 	0x00000000,
678 	(0x6e00 << 16) | (0xc904 >> 2),
679 	0x00000000,
680 	(0x7e00 << 16) | (0xc904 >> 2),
681 	0x00000000,
682 	(0x4e00 << 16) | (0xc908 >> 2),
683 	0x00000000,
684 	(0x5e00 << 16) | (0xc908 >> 2),
685 	0x00000000,
686 	(0x6e00 << 16) | (0xc908 >> 2),
687 	0x00000000,
688 	(0x7e00 << 16) | (0xc908 >> 2),
689 	0x00000000,
690 	(0x4e00 << 16) | (0xc90c >> 2),
691 	0x00000000,
692 	(0x5e00 << 16) | (0xc90c >> 2),
693 	0x00000000,
694 	(0x6e00 << 16) | (0xc90c >> 2),
695 	0x00000000,
696 	(0x7e00 << 16) | (0xc90c >> 2),
697 	0x00000000,
698 	(0x4e00 << 16) | (0xc910 >> 2),
699 	0x00000000,
700 	(0x5e00 << 16) | (0xc910 >> 2),
701 	0x00000000,
702 	(0x6e00 << 16) | (0xc910 >> 2),
703 	0x00000000,
704 	(0x7e00 << 16) | (0xc910 >> 2),
705 	0x00000000,
706 	(0x0e00 << 16) | (0xc99c >> 2),
707 	0x00000000,
708 	(0x0e00 << 16) | (0x9834 >> 2),
709 	0x00000000,
710 	(0x0000 << 16) | (0x30f00 >> 2),
711 	0x00000000,
712 	(0x0000 << 16) | (0x30f04 >> 2),
713 	0x00000000,
714 	(0x0000 << 16) | (0x30f08 >> 2),
715 	0x00000000,
716 	(0x0000 << 16) | (0x30f0c >> 2),
717 	0x00000000,
718 	(0x0600 << 16) | (0x9b7c >> 2),
719 	0x00000000,
720 	(0x0e00 << 16) | (0x8a14 >> 2),
721 	0x00000000,
722 	(0x0e00 << 16) | (0x8a18 >> 2),
723 	0x00000000,
724 	(0x0600 << 16) | (0x30a00 >> 2),
725 	0x00000000,
726 	(0x0e00 << 16) | (0x8bf0 >> 2),
727 	0x00000000,
728 	(0x0e00 << 16) | (0x8bcc >> 2),
729 	0x00000000,
730 	(0x0e00 << 16) | (0x8b24 >> 2),
731 	0x00000000,
732 	(0x0e00 << 16) | (0x30a04 >> 2),
733 	0x00000000,
734 	(0x0600 << 16) | (0x30a10 >> 2),
735 	0x00000000,
736 	(0x0600 << 16) | (0x30a14 >> 2),
737 	0x00000000,
738 	(0x0600 << 16) | (0x30a18 >> 2),
739 	0x00000000,
740 	(0x0600 << 16) | (0x30a2c >> 2),
741 	0x00000000,
742 	(0x0e00 << 16) | (0xc700 >> 2),
743 	0x00000000,
744 	(0x0e00 << 16) | (0xc704 >> 2),
745 	0x00000000,
746 	(0x0e00 << 16) | (0xc708 >> 2),
747 	0x00000000,
748 	(0x0e00 << 16) | (0xc768 >> 2),
749 	0x00000000,
750 	(0x0400 << 16) | (0xc770 >> 2),
751 	0x00000000,
752 	(0x0400 << 16) | (0xc774 >> 2),
753 	0x00000000,
754 	(0x0400 << 16) | (0xc798 >> 2),
755 	0x00000000,
756 	(0x0400 << 16) | (0xc79c >> 2),
757 	0x00000000,
758 	(0x0e00 << 16) | (0x9100 >> 2),
759 	0x00000000,
760 	(0x0e00 << 16) | (0x3c010 >> 2),
761 	0x00000000,
762 	(0x0e00 << 16) | (0x8c00 >> 2),
763 	0x00000000,
764 	(0x0e00 << 16) | (0x8c04 >> 2),
765 	0x00000000,
766 	(0x0e00 << 16) | (0x8c20 >> 2),
767 	0x00000000,
768 	(0x0e00 << 16) | (0x8c38 >> 2),
769 	0x00000000,
770 	(0x0e00 << 16) | (0x8c3c >> 2),
771 	0x00000000,
772 	(0x0e00 << 16) | (0xae00 >> 2),
773 	0x00000000,
774 	(0x0e00 << 16) | (0x9604 >> 2),
775 	0x00000000,
776 	(0x0e00 << 16) | (0xac08 >> 2),
777 	0x00000000,
778 	(0x0e00 << 16) | (0xac0c >> 2),
779 	0x00000000,
780 	(0x0e00 << 16) | (0xac10 >> 2),
781 	0x00000000,
782 	(0x0e00 << 16) | (0xac14 >> 2),
783 	0x00000000,
784 	(0x0e00 << 16) | (0xac58 >> 2),
785 	0x00000000,
786 	(0x0e00 << 16) | (0xac68 >> 2),
787 	0x00000000,
788 	(0x0e00 << 16) | (0xac6c >> 2),
789 	0x00000000,
790 	(0x0e00 << 16) | (0xac70 >> 2),
791 	0x00000000,
792 	(0x0e00 << 16) | (0xac74 >> 2),
793 	0x00000000,
794 	(0x0e00 << 16) | (0xac78 >> 2),
795 	0x00000000,
796 	(0x0e00 << 16) | (0xac7c >> 2),
797 	0x00000000,
798 	(0x0e00 << 16) | (0xac80 >> 2),
799 	0x00000000,
800 	(0x0e00 << 16) | (0xac84 >> 2),
801 	0x00000000,
802 	(0x0e00 << 16) | (0xac88 >> 2),
803 	0x00000000,
804 	(0x0e00 << 16) | (0xac8c >> 2),
805 	0x00000000,
806 	(0x0e00 << 16) | (0x970c >> 2),
807 	0x00000000,
808 	(0x0e00 << 16) | (0x9714 >> 2),
809 	0x00000000,
810 	(0x0e00 << 16) | (0x9718 >> 2),
811 	0x00000000,
812 	(0x0e00 << 16) | (0x971c >> 2),
813 	0x00000000,
814 	(0x0e00 << 16) | (0x31068 >> 2),
815 	0x00000000,
816 	(0x4e00 << 16) | (0x31068 >> 2),
817 	0x00000000,
818 	(0x5e00 << 16) | (0x31068 >> 2),
819 	0x00000000,
820 	(0x6e00 << 16) | (0x31068 >> 2),
821 	0x00000000,
822 	(0x7e00 << 16) | (0x31068 >> 2),
823 	0x00000000,
824 	(0x0e00 << 16) | (0xcd10 >> 2),
825 	0x00000000,
826 	(0x0e00 << 16) | (0xcd14 >> 2),
827 	0x00000000,
828 	(0x0e00 << 16) | (0x88b0 >> 2),
829 	0x00000000,
830 	(0x0e00 << 16) | (0x88b4 >> 2),
831 	0x00000000,
832 	(0x0e00 << 16) | (0x88b8 >> 2),
833 	0x00000000,
834 	(0x0e00 << 16) | (0x88bc >> 2),
835 	0x00000000,
836 	(0x0400 << 16) | (0x89c0 >> 2),
837 	0x00000000,
838 	(0x0e00 << 16) | (0x88c4 >> 2),
839 	0x00000000,
840 	(0x0e00 << 16) | (0x88c8 >> 2),
841 	0x00000000,
842 	(0x0e00 << 16) | (0x88d0 >> 2),
843 	0x00000000,
844 	(0x0e00 << 16) | (0x88d4 >> 2),
845 	0x00000000,
846 	(0x0e00 << 16) | (0x88d8 >> 2),
847 	0x00000000,
848 	(0x0e00 << 16) | (0x8980 >> 2),
849 	0x00000000,
850 	(0x0e00 << 16) | (0x30938 >> 2),
851 	0x00000000,
852 	(0x0e00 << 16) | (0x3093c >> 2),
853 	0x00000000,
854 	(0x0e00 << 16) | (0x30940 >> 2),
855 	0x00000000,
856 	(0x0e00 << 16) | (0x89a0 >> 2),
857 	0x00000000,
858 	(0x0e00 << 16) | (0x30900 >> 2),
859 	0x00000000,
860 	(0x0e00 << 16) | (0x30904 >> 2),
861 	0x00000000,
862 	(0x0e00 << 16) | (0x89b4 >> 2),
863 	0x00000000,
864 	(0x0e00 << 16) | (0x3e1fc >> 2),
865 	0x00000000,
866 	(0x0e00 << 16) | (0x3c210 >> 2),
867 	0x00000000,
868 	(0x0e00 << 16) | (0x3c214 >> 2),
869 	0x00000000,
870 	(0x0e00 << 16) | (0x3c218 >> 2),
871 	0x00000000,
872 	(0x0e00 << 16) | (0x8904 >> 2),
873 	0x00000000,
874 	0x5,
875 	(0x0e00 << 16) | (0x8c28 >> 2),
876 	(0x0e00 << 16) | (0x8c2c >> 2),
877 	(0x0e00 << 16) | (0x8c30 >> 2),
878 	(0x0e00 << 16) | (0x8c34 >> 2),
879 	(0x0e00 << 16) | (0x9600 >> 2),
880 };
881 
882 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
883 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
884 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
885 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
886 
887 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
888 {
889 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
890 	amdgpu_ucode_release(&adev->gfx.me_fw);
891 	amdgpu_ucode_release(&adev->gfx.ce_fw);
892 	amdgpu_ucode_release(&adev->gfx.mec_fw);
893 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
894 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
895 }
896 
897 /*
898  * Core functions
899  */
900 /**
901  * gfx_v7_0_init_microcode - load ucode images from disk
902  *
903  * @adev: amdgpu_device pointer
904  *
905  * Use the firmware interface to load the ucode images into
906  * the driver (not loaded into hw).
907  * Returns 0 on success, error on failure.
908  */
909 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
910 {
911 	const char *chip_name;
912 	int err;
913 
914 	DRM_DEBUG("\n");
915 
916 	switch (adev->asic_type) {
917 	case CHIP_BONAIRE:
918 		chip_name = "bonaire";
919 		break;
920 	case CHIP_HAWAII:
921 		chip_name = "hawaii";
922 		break;
923 	case CHIP_KAVERI:
924 		chip_name = "kaveri";
925 		break;
926 	case CHIP_KABINI:
927 		chip_name = "kabini";
928 		break;
929 	case CHIP_MULLINS:
930 		chip_name = "mullins";
931 		break;
932 	default:
933 		BUG();
934 	}
935 
936 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
937 				   "amdgpu/%s_pfp.bin", chip_name);
938 	if (err)
939 		goto out;
940 
941 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
942 				   "amdgpu/%s_me.bin", chip_name);
943 	if (err)
944 		goto out;
945 
946 	err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
947 				   "amdgpu/%s_ce.bin", chip_name);
948 	if (err)
949 		goto out;
950 
951 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
952 				   "amdgpu/%s_mec.bin", chip_name);
953 	if (err)
954 		goto out;
955 
956 	if (adev->asic_type == CHIP_KAVERI) {
957 		err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
958 					   "amdgpu/%s_mec2.bin", chip_name);
959 		if (err)
960 			goto out;
961 	}
962 
963 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
964 				   "amdgpu/%s_rlc.bin", chip_name);
965 out:
966 	if (err) {
967 		pr_err("gfx7: Failed to load firmware %s gfx firmware\n", chip_name);
968 		gfx_v7_0_free_microcode(adev);
969 	}
970 	return err;
971 }
972 
973 /**
974  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
975  *
976  * @adev: amdgpu_device pointer
977  *
978  * Starting with SI, the tiling setup is done globally in a
979  * set of 32 tiling modes.  Rather than selecting each set of
980  * parameters per surface as on older asics, we just select
981  * which index in the tiling table we want to use, and the
982  * surface uses those parameters (CIK).
983  */
984 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
985 {
986 	const u32 num_tile_mode_states =
987 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
988 	const u32 num_secondary_tile_mode_states =
989 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
990 	u32 reg_offset, split_equal_to_row_size;
991 	uint32_t *tile, *macrotile;
992 
993 	tile = adev->gfx.config.tile_mode_array;
994 	macrotile = adev->gfx.config.macrotile_mode_array;
995 
996 	switch (adev->gfx.config.mem_row_size_in_kb) {
997 	case 1:
998 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
999 		break;
1000 	case 2:
1001 	default:
1002 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1003 		break;
1004 	case 4:
1005 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1006 		break;
1007 	}
1008 
1009 	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1010 		tile[reg_offset] = 0;
1011 	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1012 		macrotile[reg_offset] = 0;
1013 
1014 	switch (adev->asic_type) {
1015 	case CHIP_BONAIRE:
1016 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1017 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1018 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1019 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1020 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1021 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1022 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1023 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1024 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1025 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1026 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1027 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1028 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1029 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1030 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1031 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1032 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1033 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1034 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1035 			   TILE_SPLIT(split_equal_to_row_size));
1036 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1037 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1038 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1039 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1040 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1041 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1042 			   TILE_SPLIT(split_equal_to_row_size));
1043 		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1044 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1045 			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1046 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1047 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1048 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1049 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1050 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1051 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1052 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1053 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1054 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1056 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1057 		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1058 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1059 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1060 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1061 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1064 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1065 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1066 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1068 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1069 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1070 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1072 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1073 		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1074 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1075 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1076 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1077 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1078 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1079 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1080 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1081 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1082 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1083 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1084 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1085 		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1086 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1087 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1088 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1089 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1090 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1091 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1092 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1093 		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1094 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1095 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1096 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1097 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1098 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1099 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1102 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1103 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1106 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1107 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1109 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1110 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1111 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1112 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1113 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1114 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1115 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1116 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1117 		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1118 
1119 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1120 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1121 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1122 				NUM_BANKS(ADDR_SURF_16_BANK));
1123 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1124 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1125 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1126 				NUM_BANKS(ADDR_SURF_16_BANK));
1127 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1128 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1129 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1130 				NUM_BANKS(ADDR_SURF_16_BANK));
1131 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1132 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1133 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1134 				NUM_BANKS(ADDR_SURF_16_BANK));
1135 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1136 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1137 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1138 				NUM_BANKS(ADDR_SURF_16_BANK));
1139 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1140 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1141 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1142 				NUM_BANKS(ADDR_SURF_8_BANK));
1143 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1144 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1145 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1146 				NUM_BANKS(ADDR_SURF_4_BANK));
1147 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1148 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1149 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1150 				NUM_BANKS(ADDR_SURF_16_BANK));
1151 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1152 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1153 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1154 				NUM_BANKS(ADDR_SURF_16_BANK));
1155 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1156 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1157 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1158 				NUM_BANKS(ADDR_SURF_16_BANK));
1159 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1160 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1161 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1162 				NUM_BANKS(ADDR_SURF_16_BANK));
1163 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1164 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1165 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1166 				NUM_BANKS(ADDR_SURF_16_BANK));
1167 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1168 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1169 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1170 				NUM_BANKS(ADDR_SURF_8_BANK));
1171 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1172 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1173 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1174 				NUM_BANKS(ADDR_SURF_4_BANK));
1175 
1176 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1177 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1178 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1179 			if (reg_offset != 7)
1180 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1181 		break;
1182 	case CHIP_HAWAII:
1183 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1184 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1185 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1186 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1187 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1188 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1189 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1190 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1191 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1192 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1193 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1194 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1195 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1196 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1197 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1198 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1199 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1200 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1201 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1202 			   TILE_SPLIT(split_equal_to_row_size));
1203 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1204 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1205 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1206 			   TILE_SPLIT(split_equal_to_row_size));
1207 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1208 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1209 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1210 			   TILE_SPLIT(split_equal_to_row_size));
1211 		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1212 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1213 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1214 			   TILE_SPLIT(split_equal_to_row_size));
1215 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1216 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1217 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1218 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1219 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1220 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1223 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1224 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1225 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1227 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1228 		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1229 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1230 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1231 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1232 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1233 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1235 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1236 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1237 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1238 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1239 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1240 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1241 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1242 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1243 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1244 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1245 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1246 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1247 		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1248 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1249 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1250 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1251 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1252 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1253 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1254 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1255 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1256 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1257 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1258 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1259 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1260 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1261 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1262 		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1263 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1264 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1265 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1266 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1267 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1268 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1269 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1270 		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1271 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1272 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1273 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1274 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1275 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1276 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1277 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1278 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1279 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1280 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1281 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1282 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1283 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1284 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1285 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1286 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1287 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1288 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1289 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1290 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1291 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1292 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1293 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1294 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1295 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1296 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1297 		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1298 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1299 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1300 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1301 
1302 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1303 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1304 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1305 				NUM_BANKS(ADDR_SURF_16_BANK));
1306 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1307 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1308 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1309 				NUM_BANKS(ADDR_SURF_16_BANK));
1310 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1311 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1312 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1313 				NUM_BANKS(ADDR_SURF_16_BANK));
1314 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1315 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1316 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1317 				NUM_BANKS(ADDR_SURF_16_BANK));
1318 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1319 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1320 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1321 				NUM_BANKS(ADDR_SURF_8_BANK));
1322 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1323 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1324 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1325 				NUM_BANKS(ADDR_SURF_4_BANK));
1326 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1327 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1328 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1329 				NUM_BANKS(ADDR_SURF_4_BANK));
1330 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1331 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1332 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1333 				NUM_BANKS(ADDR_SURF_16_BANK));
1334 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1335 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1336 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1337 				NUM_BANKS(ADDR_SURF_16_BANK));
1338 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1339 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1340 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1341 				NUM_BANKS(ADDR_SURF_16_BANK));
1342 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1343 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1344 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1345 				NUM_BANKS(ADDR_SURF_8_BANK));
1346 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1347 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1348 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1349 				NUM_BANKS(ADDR_SURF_16_BANK));
1350 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1351 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1352 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1353 				NUM_BANKS(ADDR_SURF_8_BANK));
1354 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1355 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1356 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1357 				NUM_BANKS(ADDR_SURF_4_BANK));
1358 
1359 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1360 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1361 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1362 			if (reg_offset != 7)
1363 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1364 		break;
1365 	case CHIP_KABINI:
1366 	case CHIP_KAVERI:
1367 	case CHIP_MULLINS:
1368 	default:
1369 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1370 			   PIPE_CONFIG(ADDR_SURF_P2) |
1371 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1372 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1373 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1374 			   PIPE_CONFIG(ADDR_SURF_P2) |
1375 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1376 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1377 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1378 			   PIPE_CONFIG(ADDR_SURF_P2) |
1379 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1380 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1381 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1382 			   PIPE_CONFIG(ADDR_SURF_P2) |
1383 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1384 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1385 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1386 			   PIPE_CONFIG(ADDR_SURF_P2) |
1387 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1388 			   TILE_SPLIT(split_equal_to_row_size));
1389 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1390 			   PIPE_CONFIG(ADDR_SURF_P2) |
1391 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1392 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1393 			   PIPE_CONFIG(ADDR_SURF_P2) |
1394 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1395 			   TILE_SPLIT(split_equal_to_row_size));
1396 		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1397 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1398 			   PIPE_CONFIG(ADDR_SURF_P2));
1399 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1400 			   PIPE_CONFIG(ADDR_SURF_P2) |
1401 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1402 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1403 			    PIPE_CONFIG(ADDR_SURF_P2) |
1404 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1405 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1406 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1407 			    PIPE_CONFIG(ADDR_SURF_P2) |
1408 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1409 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1410 		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1411 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1412 			    PIPE_CONFIG(ADDR_SURF_P2) |
1413 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1414 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415 			    PIPE_CONFIG(ADDR_SURF_P2) |
1416 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1417 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1418 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1419 			    PIPE_CONFIG(ADDR_SURF_P2) |
1420 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1421 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1422 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1423 			    PIPE_CONFIG(ADDR_SURF_P2) |
1424 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1425 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1426 		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1427 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1428 			    PIPE_CONFIG(ADDR_SURF_P2) |
1429 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1430 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1431 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1432 			    PIPE_CONFIG(ADDR_SURF_P2) |
1433 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1434 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1435 			    PIPE_CONFIG(ADDR_SURF_P2) |
1436 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1437 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1438 		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1439 			    PIPE_CONFIG(ADDR_SURF_P2) |
1440 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1441 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1442 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1443 			    PIPE_CONFIG(ADDR_SURF_P2) |
1444 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1445 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1446 		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1447 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1448 			    PIPE_CONFIG(ADDR_SURF_P2) |
1449 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1450 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1451 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1452 			    PIPE_CONFIG(ADDR_SURF_P2) |
1453 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1454 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1455 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1456 			    PIPE_CONFIG(ADDR_SURF_P2) |
1457 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1458 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1459 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1460 			    PIPE_CONFIG(ADDR_SURF_P2) |
1461 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1462 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1463 			    PIPE_CONFIG(ADDR_SURF_P2) |
1464 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1465 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1466 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1467 			    PIPE_CONFIG(ADDR_SURF_P2) |
1468 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1469 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1470 		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1471 
1472 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1473 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1474 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1475 				NUM_BANKS(ADDR_SURF_8_BANK));
1476 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1477 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1478 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1479 				NUM_BANKS(ADDR_SURF_8_BANK));
1480 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1481 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1482 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1483 				NUM_BANKS(ADDR_SURF_8_BANK));
1484 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1485 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1486 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1487 				NUM_BANKS(ADDR_SURF_8_BANK));
1488 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1489 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1490 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1491 				NUM_BANKS(ADDR_SURF_8_BANK));
1492 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1493 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1494 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1495 				NUM_BANKS(ADDR_SURF_8_BANK));
1496 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1497 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1498 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1499 				NUM_BANKS(ADDR_SURF_8_BANK));
1500 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1501 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1502 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1503 				NUM_BANKS(ADDR_SURF_16_BANK));
1504 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1505 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1506 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1507 				NUM_BANKS(ADDR_SURF_16_BANK));
1508 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1509 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1510 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1511 				NUM_BANKS(ADDR_SURF_16_BANK));
1512 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1513 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1514 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1515 				NUM_BANKS(ADDR_SURF_16_BANK));
1516 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1517 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1518 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1519 				NUM_BANKS(ADDR_SURF_16_BANK));
1520 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1521 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1522 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1523 				NUM_BANKS(ADDR_SURF_16_BANK));
1524 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1525 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1526 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1527 				NUM_BANKS(ADDR_SURF_8_BANK));
1528 
1529 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1530 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1531 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1532 			if (reg_offset != 7)
1533 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1534 		break;
1535 	}
1536 }
1537 
1538 /**
1539  * gfx_v7_0_select_se_sh - select which SE, SH to address
1540  *
1541  * @adev: amdgpu_device pointer
1542  * @se_num: shader engine to address
1543  * @sh_num: sh block to address
1544  * @instance: Certain registers are instanced per SE or SH.
1545  *            0xffffffff means broadcast to all SEs or SHs (CIK).
1546  * @xcc_id: xcc accelerated compute core id
1547  * Select which SE, SH combinations to address.
1548  */
1549 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1550 				  u32 se_num, u32 sh_num, u32 instance,
1551 				  int xcc_id)
1552 {
1553 	u32 data;
1554 
1555 	if (instance == 0xffffffff)
1556 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1557 	else
1558 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1559 
1560 	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1561 		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1562 			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1563 	else if (se_num == 0xffffffff)
1564 		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1565 			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1566 	else if (sh_num == 0xffffffff)
1567 		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1568 			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1569 	else
1570 		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1571 			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1572 	WREG32(mmGRBM_GFX_INDEX, data);
1573 }
1574 
1575 /**
1576  * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1577  *
1578  * @adev: amdgpu_device pointer
1579  *
1580  * Calculates the bitmask of enabled RBs (CIK).
1581  * Returns the enabled RB bitmask.
1582  */
1583 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1584 {
1585 	u32 data, mask;
1586 
1587 	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1588 	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1589 
1590 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1591 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1592 
1593 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1594 					 adev->gfx.config.max_sh_per_se);
1595 
1596 	return (~data) & mask;
1597 }
1598 
1599 static void
1600 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1601 {
1602 	switch (adev->asic_type) {
1603 	case CHIP_BONAIRE:
1604 		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1605 			  SE_XSEL(1) | SE_YSEL(1);
1606 		*rconf1 |= 0x0;
1607 		break;
1608 	case CHIP_HAWAII:
1609 		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1610 			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1611 			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1612 			  SE_YSEL(3);
1613 		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1614 			   SE_PAIR_YSEL(2);
1615 		break;
1616 	case CHIP_KAVERI:
1617 		*rconf |= RB_MAP_PKR0(2);
1618 		*rconf1 |= 0x0;
1619 		break;
1620 	case CHIP_KABINI:
1621 	case CHIP_MULLINS:
1622 		*rconf |= 0x0;
1623 		*rconf1 |= 0x0;
1624 		break;
1625 	default:
1626 		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1627 		break;
1628 	}
1629 }
1630 
1631 static void
1632 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1633 					u32 raster_config, u32 raster_config_1,
1634 					unsigned rb_mask, unsigned num_rb)
1635 {
1636 	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1637 	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1638 	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1639 	unsigned rb_per_se = num_rb / num_se;
1640 	unsigned se_mask[4];
1641 	unsigned se;
1642 
1643 	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1644 	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1645 	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1646 	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1647 
1648 	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1649 	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1650 	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1651 
1652 	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1653 			     (!se_mask[2] && !se_mask[3]))) {
1654 		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1655 
1656 		if (!se_mask[0] && !se_mask[1]) {
1657 			raster_config_1 |=
1658 				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1659 		} else {
1660 			raster_config_1 |=
1661 				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1662 		}
1663 	}
1664 
1665 	for (se = 0; se < num_se; se++) {
1666 		unsigned raster_config_se = raster_config;
1667 		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1668 		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1669 		int idx = (se / 2) * 2;
1670 
1671 		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1672 			raster_config_se &= ~SE_MAP_MASK;
1673 
1674 			if (!se_mask[idx]) {
1675 				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1676 			} else {
1677 				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1678 			}
1679 		}
1680 
1681 		pkr0_mask &= rb_mask;
1682 		pkr1_mask &= rb_mask;
1683 		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1684 			raster_config_se &= ~PKR_MAP_MASK;
1685 
1686 			if (!pkr0_mask) {
1687 				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1688 			} else {
1689 				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1690 			}
1691 		}
1692 
1693 		if (rb_per_se >= 2) {
1694 			unsigned rb0_mask = 1 << (se * rb_per_se);
1695 			unsigned rb1_mask = rb0_mask << 1;
1696 
1697 			rb0_mask &= rb_mask;
1698 			rb1_mask &= rb_mask;
1699 			if (!rb0_mask || !rb1_mask) {
1700 				raster_config_se &= ~RB_MAP_PKR0_MASK;
1701 
1702 				if (!rb0_mask) {
1703 					raster_config_se |=
1704 						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1705 				} else {
1706 					raster_config_se |=
1707 						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1708 				}
1709 			}
1710 
1711 			if (rb_per_se > 2) {
1712 				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1713 				rb1_mask = rb0_mask << 1;
1714 				rb0_mask &= rb_mask;
1715 				rb1_mask &= rb_mask;
1716 				if (!rb0_mask || !rb1_mask) {
1717 					raster_config_se &= ~RB_MAP_PKR1_MASK;
1718 
1719 					if (!rb0_mask) {
1720 						raster_config_se |=
1721 							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1722 					} else {
1723 						raster_config_se |=
1724 							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1725 					}
1726 				}
1727 			}
1728 		}
1729 
1730 		/* GRBM_GFX_INDEX has a different offset on CI+ */
1731 		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
1732 		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1733 		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1734 	}
1735 
1736 	/* GRBM_GFX_INDEX has a different offset on CI+ */
1737 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1738 }
1739 
1740 /**
1741  * gfx_v7_0_setup_rb - setup the RBs on the asic
1742  *
1743  * @adev: amdgpu_device pointer
1744  *
1745  * Configures per-SE/SH RB registers (CIK).
1746  */
1747 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1748 {
1749 	int i, j;
1750 	u32 data;
1751 	u32 raster_config = 0, raster_config_1 = 0;
1752 	u32 active_rbs = 0;
1753 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1754 					adev->gfx.config.max_sh_per_se;
1755 	unsigned num_rb_pipes;
1756 
1757 	mutex_lock(&adev->grbm_idx_mutex);
1758 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1759 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1760 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1761 			data = gfx_v7_0_get_rb_active_bitmap(adev);
1762 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1763 					       rb_bitmap_width_per_sh);
1764 		}
1765 	}
1766 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1767 
1768 	adev->gfx.config.backend_enable_mask = active_rbs;
1769 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1770 
1771 	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1772 			     adev->gfx.config.max_shader_engines, 16);
1773 
1774 	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1775 
1776 	if (!adev->gfx.config.backend_enable_mask ||
1777 			adev->gfx.config.num_rbs >= num_rb_pipes) {
1778 		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1779 		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1780 	} else {
1781 		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1782 							adev->gfx.config.backend_enable_mask,
1783 							num_rb_pipes);
1784 	}
1785 
1786 	/* cache the values for userspace */
1787 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1788 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1789 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1790 			adev->gfx.config.rb_config[i][j].rb_backend_disable =
1791 				RREG32(mmCC_RB_BACKEND_DISABLE);
1792 			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1793 				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1794 			adev->gfx.config.rb_config[i][j].raster_config =
1795 				RREG32(mmPA_SC_RASTER_CONFIG);
1796 			adev->gfx.config.rb_config[i][j].raster_config_1 =
1797 				RREG32(mmPA_SC_RASTER_CONFIG_1);
1798 		}
1799 	}
1800 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1801 	mutex_unlock(&adev->grbm_idx_mutex);
1802 }
1803 
1804 #define DEFAULT_SH_MEM_BASES	(0x6000)
1805 /**
1806  * gfx_v7_0_init_compute_vmid - gart enable
1807  *
1808  * @adev: amdgpu_device pointer
1809  *
1810  * Initialize compute vmid sh_mem registers
1811  *
1812  */
1813 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1814 {
1815 	int i;
1816 	uint32_t sh_mem_config;
1817 	uint32_t sh_mem_bases;
1818 
1819 	/*
1820 	 * Configure apertures:
1821 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1822 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1823 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1824 	*/
1825 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1826 	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1827 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1828 	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1829 	mutex_lock(&adev->srbm_mutex);
1830 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1831 		cik_srbm_select(adev, 0, 0, 0, i);
1832 		/* CP and shaders */
1833 		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1834 		WREG32(mmSH_MEM_APE1_BASE, 1);
1835 		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1836 		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1837 	}
1838 	cik_srbm_select(adev, 0, 0, 0, 0);
1839 	mutex_unlock(&adev->srbm_mutex);
1840 
1841 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1842 	   access. These should be enabled by FW for target VMIDs. */
1843 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1844 		WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
1845 		WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
1846 		WREG32(amdgpu_gds_reg_offset[i].gws, 0);
1847 		WREG32(amdgpu_gds_reg_offset[i].oa, 0);
1848 	}
1849 }
1850 
1851 static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev)
1852 {
1853 	int vmid;
1854 
1855 	/*
1856 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1857 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1858 	 * the driver can enable them for graphics. VMID0 should maintain
1859 	 * access so that HWS firmware can save/restore entries.
1860 	 */
1861 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1862 		WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
1863 		WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
1864 		WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
1865 		WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
1866 	}
1867 }
1868 
1869 static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1870 {
1871 	adev->gfx.config.double_offchip_lds_buf = 1;
1872 }
1873 
1874 /**
1875  * gfx_v7_0_constants_init - setup the 3D engine
1876  *
1877  * @adev: amdgpu_device pointer
1878  *
1879  * init the gfx constants such as the 3D engine, tiling configuration
1880  * registers, maximum number of quad pipes, render backends...
1881  */
1882 static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1883 {
1884 	u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
1885 	u32 tmp;
1886 	int i;
1887 
1888 	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1889 
1890 	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1891 	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1892 	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1893 
1894 	gfx_v7_0_tiling_mode_table_init(adev);
1895 
1896 	gfx_v7_0_setup_rb(adev);
1897 	gfx_v7_0_get_cu_info(adev);
1898 	gfx_v7_0_config_init(adev);
1899 
1900 	/* set HW defaults for 3D engine */
1901 	WREG32(mmCP_MEQ_THRESHOLDS,
1902 	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1903 	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1904 
1905 	mutex_lock(&adev->grbm_idx_mutex);
1906 	/*
1907 	 * making sure that the following register writes will be broadcasted
1908 	 * to all the shaders
1909 	 */
1910 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1911 
1912 	/* XXX SH_MEM regs */
1913 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1914 	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1915 				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1916 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
1917 				   MTYPE_NC);
1918 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
1919 				   MTYPE_UC);
1920 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
1921 
1922 	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
1923 				   SWIZZLE_ENABLE, 1);
1924 	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1925 				   ELEMENT_SIZE, 1);
1926 	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1927 				   INDEX_STRIDE, 3);
1928 	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
1929 
1930 	mutex_lock(&adev->srbm_mutex);
1931 	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1932 		if (i == 0)
1933 			sh_mem_base = 0;
1934 		else
1935 			sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1936 		cik_srbm_select(adev, 0, 0, 0, i);
1937 		/* CP and shaders */
1938 		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1939 		WREG32(mmSH_MEM_APE1_BASE, 1);
1940 		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1941 		WREG32(mmSH_MEM_BASES, sh_mem_base);
1942 	}
1943 	cik_srbm_select(adev, 0, 0, 0, 0);
1944 	mutex_unlock(&adev->srbm_mutex);
1945 
1946 	gfx_v7_0_init_compute_vmid(adev);
1947 	gfx_v7_0_init_gds_vmid(adev);
1948 
1949 	WREG32(mmSX_DEBUG_1, 0x20);
1950 
1951 	WREG32(mmTA_CNTL_AUX, 0x00010000);
1952 
1953 	tmp = RREG32(mmSPI_CONFIG_CNTL);
1954 	tmp |= 0x03000000;
1955 	WREG32(mmSPI_CONFIG_CNTL, tmp);
1956 
1957 	WREG32(mmSQ_CONFIG, 1);
1958 
1959 	WREG32(mmDB_DEBUG, 0);
1960 
1961 	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1962 	tmp |= 0x00000400;
1963 	WREG32(mmDB_DEBUG2, tmp);
1964 
1965 	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1966 	tmp |= 0x00020200;
1967 	WREG32(mmDB_DEBUG3, tmp);
1968 
1969 	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1970 	tmp |= 0x00018208;
1971 	WREG32(mmCB_HW_CONTROL, tmp);
1972 
1973 	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1974 
1975 	WREG32(mmPA_SC_FIFO_SIZE,
1976 		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1977 		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1978 		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1979 		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1980 
1981 	WREG32(mmVGT_NUM_INSTANCES, 1);
1982 
1983 	WREG32(mmCP_PERFMON_CNTL, 0);
1984 
1985 	WREG32(mmSQ_CONFIG, 0);
1986 
1987 	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1988 		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1989 		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1990 
1991 	WREG32(mmVGT_CACHE_INVALIDATION,
1992 		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1993 		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1994 
1995 	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1996 	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1997 
1998 	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1999 			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2000 	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
2001 
2002 	tmp = RREG32(mmSPI_ARB_PRIORITY);
2003 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
2004 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
2005 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
2006 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
2007 	WREG32(mmSPI_ARB_PRIORITY, tmp);
2008 
2009 	mutex_unlock(&adev->grbm_idx_mutex);
2010 
2011 	udelay(50);
2012 }
2013 
2014 /**
2015  * gfx_v7_0_ring_test_ring - basic gfx ring test
2016  *
2017  * @ring: amdgpu_ring structure holding ring information
2018  *
2019  * Allocate a scratch register and write to it using the gfx ring (CIK).
2020  * Provides a basic gfx ring test to verify that the ring is working.
2021  * Used by gfx_v7_0_cp_gfx_resume();
2022  * Returns 0 on success, error on failure.
2023  */
2024 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2025 {
2026 	struct amdgpu_device *adev = ring->adev;
2027 	uint32_t tmp = 0;
2028 	unsigned i;
2029 	int r;
2030 
2031 	WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
2032 	r = amdgpu_ring_alloc(ring, 3);
2033 	if (r)
2034 		return r;
2035 
2036 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2037 	amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
2038 	amdgpu_ring_write(ring, 0xDEADBEEF);
2039 	amdgpu_ring_commit(ring);
2040 
2041 	for (i = 0; i < adev->usec_timeout; i++) {
2042 		tmp = RREG32(mmSCRATCH_REG0);
2043 		if (tmp == 0xDEADBEEF)
2044 			break;
2045 		udelay(1);
2046 	}
2047 	if (i >= adev->usec_timeout)
2048 		r = -ETIMEDOUT;
2049 	return r;
2050 }
2051 
2052 /**
2053  * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp
2054  *
2055  * @ring: amdgpu_ring structure holding ring information
2056  *
2057  * Emits an hdp flush on the cp.
2058  */
2059 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2060 {
2061 	u32 ref_and_mask;
2062 	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2063 
2064 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2065 		switch (ring->me) {
2066 		case 1:
2067 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2068 			break;
2069 		case 2:
2070 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2071 			break;
2072 		default:
2073 			return;
2074 		}
2075 	} else {
2076 		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2077 	}
2078 
2079 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2080 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2081 				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2082 				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2083 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2084 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2085 	amdgpu_ring_write(ring, ref_and_mask);
2086 	amdgpu_ring_write(ring, ref_and_mask);
2087 	amdgpu_ring_write(ring, 0x20); /* poll interval */
2088 }
2089 
2090 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2091 {
2092 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2093 	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2094 		EVENT_INDEX(4));
2095 
2096 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2097 	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2098 		EVENT_INDEX(0));
2099 }
2100 
2101 /**
2102  * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2103  *
2104  * @ring: amdgpu_ring structure holding ring information
2105  * @addr: address
2106  * @seq: sequence number
2107  * @flags: fence related flags
2108  *
2109  * Emits a fence sequence number on the gfx ring and flushes
2110  * GPU caches.
2111  */
2112 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2113 					 u64 seq, unsigned flags)
2114 {
2115 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2116 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2117 	/* Workaround for cache flush problems. First send a dummy EOP
2118 	 * event down the pipe with seq one below.
2119 	 */
2120 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2121 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2122 				 EOP_TC_ACTION_EN |
2123 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2124 				 EVENT_INDEX(5)));
2125 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2126 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2127 				DATA_SEL(1) | INT_SEL(0));
2128 	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2129 	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2130 
2131 	/* Then send the real EOP event down the pipe. */
2132 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2133 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2134 				 EOP_TC_ACTION_EN |
2135 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2136 				 EVENT_INDEX(5)));
2137 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2138 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2139 				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2140 	amdgpu_ring_write(ring, lower_32_bits(seq));
2141 	amdgpu_ring_write(ring, upper_32_bits(seq));
2142 }
2143 
2144 /**
2145  * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2146  *
2147  * @ring: amdgpu_ring structure holding ring information
2148  * @addr: address
2149  * @seq: sequence number
2150  * @flags: fence related flags
2151  *
2152  * Emits a fence sequence number on the compute ring and flushes
2153  * GPU caches.
2154  */
2155 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2156 					     u64 addr, u64 seq,
2157 					     unsigned flags)
2158 {
2159 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2160 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2161 
2162 	/* RELEASE_MEM - flush caches, send int */
2163 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2164 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2165 				 EOP_TC_ACTION_EN |
2166 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2167 				 EVENT_INDEX(5)));
2168 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2169 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2170 	amdgpu_ring_write(ring, upper_32_bits(addr));
2171 	amdgpu_ring_write(ring, lower_32_bits(seq));
2172 	amdgpu_ring_write(ring, upper_32_bits(seq));
2173 }
2174 
2175 /*
2176  * IB stuff
2177  */
2178 /**
2179  * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring
2180  *
2181  * @ring: amdgpu_ring structure holding ring information
2182  * @job: job to retrieve vmid from
2183  * @ib: amdgpu indirect buffer object
2184  * @flags: options (AMDGPU_HAVE_CTX_SWITCH)
2185  *
2186  * Emits an DE (drawing engine) or CE (constant engine) IB
2187  * on the gfx ring.  IBs are usually generated by userspace
2188  * acceleration drivers and submitted to the kernel for
2189  * scheduling on the ring.  This function schedules the IB
2190  * on the gfx ring for execution by the GPU.
2191  */
2192 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2193 					struct amdgpu_job *job,
2194 					struct amdgpu_ib *ib,
2195 					uint32_t flags)
2196 {
2197 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2198 	u32 header, control = 0;
2199 
2200 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2201 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2202 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2203 		amdgpu_ring_write(ring, 0);
2204 	}
2205 
2206 	if (ib->flags & AMDGPU_IB_FLAG_CE)
2207 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2208 	else
2209 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2210 
2211 	control |= ib->length_dw | (vmid << 24);
2212 
2213 	amdgpu_ring_write(ring, header);
2214 	amdgpu_ring_write(ring,
2215 #ifdef __BIG_ENDIAN
2216 			  (2 << 0) |
2217 #endif
2218 			  (ib->gpu_addr & 0xFFFFFFFC));
2219 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2220 	amdgpu_ring_write(ring, control);
2221 }
2222 
2223 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2224 					  struct amdgpu_job *job,
2225 					  struct amdgpu_ib *ib,
2226 					  uint32_t flags)
2227 {
2228 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2229 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2230 
2231 	/* Currently, there is a high possibility to get wave ID mismatch
2232 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2233 	 * different wave IDs than the GDS expects. This situation happens
2234 	 * randomly when at least 5 compute pipes use GDS ordered append.
2235 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2236 	 * Those are probably bugs somewhere else in the kernel driver.
2237 	 *
2238 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2239 	 * GDS to 0 for this ring (me/pipe).
2240 	 */
2241 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2242 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2243 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
2244 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2245 	}
2246 
2247 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2248 	amdgpu_ring_write(ring,
2249 #ifdef __BIG_ENDIAN
2250 					  (2 << 0) |
2251 #endif
2252 					  (ib->gpu_addr & 0xFFFFFFFC));
2253 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2254 	amdgpu_ring_write(ring, control);
2255 }
2256 
2257 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2258 {
2259 	uint32_t dw2 = 0;
2260 
2261 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2262 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2263 		gfx_v7_0_ring_emit_vgt_flush(ring);
2264 		/* set load_global_config & load_global_uconfig */
2265 		dw2 |= 0x8001;
2266 		/* set load_cs_sh_regs */
2267 		dw2 |= 0x01000000;
2268 		/* set load_per_context_state & load_gfx_sh_regs */
2269 		dw2 |= 0x10002;
2270 	}
2271 
2272 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2273 	amdgpu_ring_write(ring, dw2);
2274 	amdgpu_ring_write(ring, 0);
2275 }
2276 
2277 /**
2278  * gfx_v7_0_ring_test_ib - basic ring IB test
2279  *
2280  * @ring: amdgpu_ring structure holding ring information
2281  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
2282  *
2283  * Allocate an IB and execute it on the gfx ring (CIK).
2284  * Provides a basic gfx ring test to verify that IBs are working.
2285  * Returns 0 on success, error on failure.
2286  */
2287 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2288 {
2289 	struct amdgpu_device *adev = ring->adev;
2290 	struct amdgpu_ib ib;
2291 	struct dma_fence *f = NULL;
2292 	uint32_t tmp = 0;
2293 	long r;
2294 
2295 	WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
2296 	memset(&ib, 0, sizeof(ib));
2297 	r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
2298 	if (r)
2299 		return r;
2300 
2301 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2302 	ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START;
2303 	ib.ptr[2] = 0xDEADBEEF;
2304 	ib.length_dw = 3;
2305 
2306 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
2307 	if (r)
2308 		goto error;
2309 
2310 	r = dma_fence_wait_timeout(f, false, timeout);
2311 	if (r == 0) {
2312 		r = -ETIMEDOUT;
2313 		goto error;
2314 	} else if (r < 0) {
2315 		goto error;
2316 	}
2317 	tmp = RREG32(mmSCRATCH_REG0);
2318 	if (tmp == 0xDEADBEEF)
2319 		r = 0;
2320 	else
2321 		r = -EINVAL;
2322 
2323 error:
2324 	amdgpu_ib_free(adev, &ib, NULL);
2325 	dma_fence_put(f);
2326 	return r;
2327 }
2328 
2329 /*
2330  * CP.
2331  * On CIK, gfx and compute now have independent command processors.
2332  *
2333  * GFX
2334  * Gfx consists of a single ring and can process both gfx jobs and
2335  * compute jobs.  The gfx CP consists of three microengines (ME):
2336  * PFP - Pre-Fetch Parser
2337  * ME - Micro Engine
2338  * CE - Constant Engine
2339  * The PFP and ME make up what is considered the Drawing Engine (DE).
2340  * The CE is an asynchronous engine used for updating buffer desciptors
2341  * used by the DE so that they can be loaded into cache in parallel
2342  * while the DE is processing state update packets.
2343  *
2344  * Compute
2345  * The compute CP consists of two microengines (ME):
2346  * MEC1 - Compute MicroEngine 1
2347  * MEC2 - Compute MicroEngine 2
2348  * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2349  * The queues are exposed to userspace and are programmed directly
2350  * by the compute runtime.
2351  */
2352 /**
2353  * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2354  *
2355  * @adev: amdgpu_device pointer
2356  * @enable: enable or disable the MEs
2357  *
2358  * Halts or unhalts the gfx MEs.
2359  */
2360 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2361 {
2362 	if (enable)
2363 		WREG32(mmCP_ME_CNTL, 0);
2364 	else
2365 		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
2366 				      CP_ME_CNTL__PFP_HALT_MASK |
2367 				      CP_ME_CNTL__CE_HALT_MASK));
2368 	udelay(50);
2369 }
2370 
2371 /**
2372  * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2373  *
2374  * @adev: amdgpu_device pointer
2375  *
2376  * Loads the gfx PFP, ME, and CE ucode.
2377  * Returns 0 for success, -EINVAL if the ucode is not available.
2378  */
2379 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2380 {
2381 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2382 	const struct gfx_firmware_header_v1_0 *ce_hdr;
2383 	const struct gfx_firmware_header_v1_0 *me_hdr;
2384 	const __le32 *fw_data;
2385 	unsigned i, fw_size;
2386 
2387 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2388 		return -EINVAL;
2389 
2390 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2391 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2392 	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2393 
2394 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2395 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2396 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2397 	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2398 	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2399 	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2400 	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2401 	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2402 	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2403 
2404 	gfx_v7_0_cp_gfx_enable(adev, false);
2405 
2406 	/* PFP */
2407 	fw_data = (const __le32 *)
2408 		(adev->gfx.pfp_fw->data +
2409 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2410 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2411 	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2412 	for (i = 0; i < fw_size; i++)
2413 		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2414 	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2415 
2416 	/* CE */
2417 	fw_data = (const __le32 *)
2418 		(adev->gfx.ce_fw->data +
2419 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2420 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2421 	WREG32(mmCP_CE_UCODE_ADDR, 0);
2422 	for (i = 0; i < fw_size; i++)
2423 		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2424 	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2425 
2426 	/* ME */
2427 	fw_data = (const __le32 *)
2428 		(adev->gfx.me_fw->data +
2429 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2430 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2431 	WREG32(mmCP_ME_RAM_WADDR, 0);
2432 	for (i = 0; i < fw_size; i++)
2433 		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2434 	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2435 
2436 	return 0;
2437 }
2438 
2439 /**
2440  * gfx_v7_0_cp_gfx_start - start the gfx ring
2441  *
2442  * @adev: amdgpu_device pointer
2443  *
2444  * Enables the ring and loads the clear state context and other
2445  * packets required to init the ring.
2446  * Returns 0 for success, error for failure.
2447  */
2448 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2449 {
2450 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2451 	const struct cs_section_def *sect = NULL;
2452 	const struct cs_extent_def *ext = NULL;
2453 	int r, i;
2454 
2455 	/* init the CP */
2456 	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2457 	WREG32(mmCP_ENDIAN_SWAP, 0);
2458 	WREG32(mmCP_DEVICE_ID, 1);
2459 
2460 	gfx_v7_0_cp_gfx_enable(adev, true);
2461 
2462 	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2463 	if (r) {
2464 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2465 		return r;
2466 	}
2467 
2468 	/* init the CE partitions.  CE only used for gfx on CIK */
2469 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2470 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2471 	amdgpu_ring_write(ring, 0x8000);
2472 	amdgpu_ring_write(ring, 0x8000);
2473 
2474 	/* clear state buffer */
2475 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2476 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2477 
2478 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2479 	amdgpu_ring_write(ring, 0x80000000);
2480 	amdgpu_ring_write(ring, 0x80000000);
2481 
2482 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2483 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2484 			if (sect->id == SECT_CONTEXT) {
2485 				amdgpu_ring_write(ring,
2486 						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2487 				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2488 				for (i = 0; i < ext->reg_count; i++)
2489 					amdgpu_ring_write(ring, ext->extent[i]);
2490 			}
2491 		}
2492 	}
2493 
2494 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2495 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2496 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2497 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
2498 
2499 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2500 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2501 
2502 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2503 	amdgpu_ring_write(ring, 0);
2504 
2505 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2506 	amdgpu_ring_write(ring, 0x00000316);
2507 	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2508 	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2509 
2510 	amdgpu_ring_commit(ring);
2511 
2512 	return 0;
2513 }
2514 
2515 /**
2516  * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2517  *
2518  * @adev: amdgpu_device pointer
2519  *
2520  * Program the location and size of the gfx ring buffer
2521  * and test it to make sure it's working.
2522  * Returns 0 for success, error for failure.
2523  */
2524 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2525 {
2526 	struct amdgpu_ring *ring;
2527 	u32 tmp;
2528 	u32 rb_bufsz;
2529 	u64 rb_addr, rptr_addr;
2530 	int r;
2531 
2532 	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2533 	if (adev->asic_type != CHIP_HAWAII)
2534 		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2535 
2536 	/* Set the write pointer delay */
2537 	WREG32(mmCP_RB_WPTR_DELAY, 0);
2538 
2539 	/* set the RB to use vmid 0 */
2540 	WREG32(mmCP_RB_VMID, 0);
2541 
2542 	WREG32(mmSCRATCH_ADDR, 0);
2543 
2544 	/* ring 0 - compute and gfx */
2545 	/* Set ring buffer size */
2546 	ring = &adev->gfx.gfx_ring[0];
2547 	rb_bufsz = order_base_2(ring->ring_size / 8);
2548 	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2549 #ifdef __BIG_ENDIAN
2550 	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2551 #endif
2552 	WREG32(mmCP_RB0_CNTL, tmp);
2553 
2554 	/* Initialize the ring buffer's read and write pointers */
2555 	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2556 	ring->wptr = 0;
2557 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2558 
2559 	/* set the wb address wether it's enabled or not */
2560 	rptr_addr = ring->rptr_gpu_addr;
2561 	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2562 	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2563 
2564 	/* scratch register shadowing is no longer supported */
2565 	WREG32(mmSCRATCH_UMSK, 0);
2566 
2567 	mdelay(1);
2568 	WREG32(mmCP_RB0_CNTL, tmp);
2569 
2570 	rb_addr = ring->gpu_addr >> 8;
2571 	WREG32(mmCP_RB0_BASE, rb_addr);
2572 	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2573 
2574 	/* start the ring */
2575 	gfx_v7_0_cp_gfx_start(adev);
2576 	r = amdgpu_ring_test_helper(ring);
2577 	if (r)
2578 		return r;
2579 
2580 	return 0;
2581 }
2582 
2583 static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2584 {
2585 	return *ring->rptr_cpu_addr;
2586 }
2587 
2588 static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2589 {
2590 	struct amdgpu_device *adev = ring->adev;
2591 
2592 	return RREG32(mmCP_RB0_WPTR);
2593 }
2594 
2595 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2596 {
2597 	struct amdgpu_device *adev = ring->adev;
2598 
2599 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2600 	(void)RREG32(mmCP_RB0_WPTR);
2601 }
2602 
2603 static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2604 {
2605 	/* XXX check if swapping is necessary on BE */
2606 	return *ring->wptr_cpu_addr;
2607 }
2608 
2609 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2610 {
2611 	struct amdgpu_device *adev = ring->adev;
2612 
2613 	/* XXX check if swapping is necessary on BE */
2614 	*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2615 	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2616 }
2617 
2618 /**
2619  * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2620  *
2621  * @adev: amdgpu_device pointer
2622  * @enable: enable or disable the MEs
2623  *
2624  * Halts or unhalts the compute MEs.
2625  */
2626 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2627 {
2628 	if (enable)
2629 		WREG32(mmCP_MEC_CNTL, 0);
2630 	else
2631 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2632 				       CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2633 	udelay(50);
2634 }
2635 
2636 /**
2637  * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2638  *
2639  * @adev: amdgpu_device pointer
2640  *
2641  * Loads the compute MEC1&2 ucode.
2642  * Returns 0 for success, -EINVAL if the ucode is not available.
2643  */
2644 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2645 {
2646 	const struct gfx_firmware_header_v1_0 *mec_hdr;
2647 	const __le32 *fw_data;
2648 	unsigned i, fw_size;
2649 
2650 	if (!adev->gfx.mec_fw)
2651 		return -EINVAL;
2652 
2653 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2654 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2655 	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2656 	adev->gfx.mec_feature_version = le32_to_cpu(
2657 					mec_hdr->ucode_feature_version);
2658 
2659 	gfx_v7_0_cp_compute_enable(adev, false);
2660 
2661 	/* MEC1 */
2662 	fw_data = (const __le32 *)
2663 		(adev->gfx.mec_fw->data +
2664 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2665 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2666 	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2667 	for (i = 0; i < fw_size; i++)
2668 		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2669 	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2670 
2671 	if (adev->asic_type == CHIP_KAVERI) {
2672 		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2673 
2674 		if (!adev->gfx.mec2_fw)
2675 			return -EINVAL;
2676 
2677 		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2678 		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2679 		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2680 		adev->gfx.mec2_feature_version = le32_to_cpu(
2681 				mec2_hdr->ucode_feature_version);
2682 
2683 		/* MEC2 */
2684 		fw_data = (const __le32 *)
2685 			(adev->gfx.mec2_fw->data +
2686 			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2687 		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2688 		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2689 		for (i = 0; i < fw_size; i++)
2690 			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2691 		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2692 	}
2693 
2694 	return 0;
2695 }
2696 
2697 /**
2698  * gfx_v7_0_cp_compute_fini - stop the compute queues
2699  *
2700  * @adev: amdgpu_device pointer
2701  *
2702  * Stop the compute queues and tear down the driver queue
2703  * info.
2704  */
2705 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2706 {
2707 	int i;
2708 
2709 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2710 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2711 
2712 		amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
2713 	}
2714 }
2715 
2716 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2717 {
2718 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
2719 }
2720 
2721 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2722 {
2723 	int r;
2724 	u32 *hpd;
2725 	size_t mec_hpd_size;
2726 
2727 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2728 
2729 	/* take ownership of the relevant compute queues */
2730 	amdgpu_gfx_compute_queue_acquire(adev);
2731 
2732 	/* allocate space for ALL pipes (even the ones we don't own) */
2733 	mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2734 		* GFX7_MEC_HPD_SIZE * 2;
2735 
2736 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2737 				      AMDGPU_GEM_DOMAIN_VRAM |
2738 				      AMDGPU_GEM_DOMAIN_GTT,
2739 				      &adev->gfx.mec.hpd_eop_obj,
2740 				      &adev->gfx.mec.hpd_eop_gpu_addr,
2741 				      (void **)&hpd);
2742 	if (r) {
2743 		dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2744 		gfx_v7_0_mec_fini(adev);
2745 		return r;
2746 	}
2747 
2748 	/* clear memory.  Not sure if this is required or not */
2749 	memset(hpd, 0, mec_hpd_size);
2750 
2751 	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2752 	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2753 
2754 	return 0;
2755 }
2756 
2757 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2758 				       int mec, int pipe)
2759 {
2760 	u64 eop_gpu_addr;
2761 	u32 tmp;
2762 	size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2763 			    * GFX7_MEC_HPD_SIZE * 2;
2764 
2765 	mutex_lock(&adev->srbm_mutex);
2766 	eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2767 
2768 	cik_srbm_select(adev, mec + 1, pipe, 0, 0);
2769 
2770 	/* write the EOP addr */
2771 	WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2772 	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2773 
2774 	/* set the VMID assigned */
2775 	WREG32(mmCP_HPD_EOP_VMID, 0);
2776 
2777 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2778 	tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2779 	tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2780 	tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
2781 	WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2782 
2783 	cik_srbm_select(adev, 0, 0, 0, 0);
2784 	mutex_unlock(&adev->srbm_mutex);
2785 }
2786 
2787 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2788 {
2789 	int i;
2790 
2791 	/* disable the queue if it's active */
2792 	if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2793 		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2794 		for (i = 0; i < adev->usec_timeout; i++) {
2795 			if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2796 				break;
2797 			udelay(1);
2798 		}
2799 
2800 		if (i == adev->usec_timeout)
2801 			return -ETIMEDOUT;
2802 
2803 		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
2804 		WREG32(mmCP_HQD_PQ_RPTR, 0);
2805 		WREG32(mmCP_HQD_PQ_WPTR, 0);
2806 	}
2807 
2808 	return 0;
2809 }
2810 
2811 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2812 			     struct cik_mqd *mqd,
2813 			     uint64_t mqd_gpu_addr,
2814 			     struct amdgpu_ring *ring)
2815 {
2816 	u64 hqd_gpu_addr;
2817 	u64 wb_gpu_addr;
2818 
2819 	/* init the mqd struct */
2820 	memset(mqd, 0, sizeof(struct cik_mqd));
2821 
2822 	mqd->header = 0xC0310800;
2823 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2824 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2825 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2826 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2827 
2828 	/* enable doorbell? */
2829 	mqd->cp_hqd_pq_doorbell_control =
2830 		RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2831 	if (ring->use_doorbell)
2832 		mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2833 	else
2834 		mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2835 
2836 	/* set the pointer to the MQD */
2837 	mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
2838 	mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2839 
2840 	/* set MQD vmid to 0 */
2841 	mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2842 	mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2843 
2844 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2845 	hqd_gpu_addr = ring->gpu_addr >> 8;
2846 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2847 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2848 
2849 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2850 	mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2851 	mqd->cp_hqd_pq_control &=
2852 		~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2853 				CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2854 
2855 	mqd->cp_hqd_pq_control |=
2856 		order_base_2(ring->ring_size / 8);
2857 	mqd->cp_hqd_pq_control |=
2858 		(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2859 #ifdef __BIG_ENDIAN
2860 	mqd->cp_hqd_pq_control |=
2861 		2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2862 #endif
2863 	mqd->cp_hqd_pq_control &=
2864 		~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2865 				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2866 				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2867 	mqd->cp_hqd_pq_control |=
2868 		CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2869 		CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2870 
2871 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2872 	wb_gpu_addr = ring->wptr_gpu_addr;
2873 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2874 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2875 
2876 	/* set the wb address wether it's enabled or not */
2877 	wb_gpu_addr = ring->rptr_gpu_addr;
2878 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2879 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2880 		upper_32_bits(wb_gpu_addr) & 0xffff;
2881 
2882 	/* enable the doorbell if requested */
2883 	if (ring->use_doorbell) {
2884 		mqd->cp_hqd_pq_doorbell_control =
2885 			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2886 		mqd->cp_hqd_pq_doorbell_control &=
2887 			~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
2888 		mqd->cp_hqd_pq_doorbell_control |=
2889 			(ring->doorbell_index <<
2890 			 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
2891 		mqd->cp_hqd_pq_doorbell_control |=
2892 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2893 		mqd->cp_hqd_pq_doorbell_control &=
2894 			~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
2895 					CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
2896 
2897 	} else {
2898 		mqd->cp_hqd_pq_doorbell_control = 0;
2899 	}
2900 
2901 	/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2902 	ring->wptr = 0;
2903 	mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
2904 	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2905 
2906 	/* set the vmid for the queue */
2907 	mqd->cp_hqd_vmid = 0;
2908 
2909 	/* defaults */
2910 	mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
2911 	mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
2912 	mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
2913 	mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
2914 	mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
2915 	mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
2916 	mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
2917 	mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
2918 	mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
2919 	mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
2920 	mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
2921 	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2922 	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
2923 	mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
2924 	mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
2925 	mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
2926 
2927 	/* activate the queue */
2928 	mqd->cp_hqd_active = 1;
2929 }
2930 
2931 static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
2932 {
2933 	uint32_t tmp;
2934 	uint32_t mqd_reg;
2935 	uint32_t *mqd_data;
2936 
2937 	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
2938 	mqd_data = &mqd->cp_mqd_base_addr_lo;
2939 
2940 	/* disable wptr polling */
2941 	tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
2942 	tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2943 	WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
2944 
2945 	/* program all HQD registers */
2946 	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
2947 		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
2948 
2949 	/* activate the HQD */
2950 	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
2951 		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
2952 
2953 	return 0;
2954 }
2955 
2956 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
2957 {
2958 	int r;
2959 	u64 mqd_gpu_addr;
2960 	struct cik_mqd *mqd;
2961 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2962 
2963 	r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
2964 				      AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
2965 				      &mqd_gpu_addr, (void **)&mqd);
2966 	if (r) {
2967 		dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2968 		return r;
2969 	}
2970 
2971 	mutex_lock(&adev->srbm_mutex);
2972 	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2973 
2974 	gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
2975 	gfx_v7_0_mqd_deactivate(adev);
2976 	gfx_v7_0_mqd_commit(adev, mqd);
2977 
2978 	cik_srbm_select(adev, 0, 0, 0, 0);
2979 	mutex_unlock(&adev->srbm_mutex);
2980 
2981 	amdgpu_bo_kunmap(ring->mqd_obj);
2982 	amdgpu_bo_unreserve(ring->mqd_obj);
2983 	return 0;
2984 }
2985 
2986 /**
2987  * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2988  *
2989  * @adev: amdgpu_device pointer
2990  *
2991  * Program the compute queues and test them to make sure they
2992  * are working.
2993  * Returns 0 for success, error for failure.
2994  */
2995 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2996 {
2997 	int r, i, j;
2998 	u32 tmp;
2999 	struct amdgpu_ring *ring;
3000 
3001 	/* fix up chicken bits */
3002 	tmp = RREG32(mmCP_CPF_DEBUG);
3003 	tmp |= (1 << 23);
3004 	WREG32(mmCP_CPF_DEBUG, tmp);
3005 
3006 	/* init all pipes (even the ones we don't own) */
3007 	for (i = 0; i < adev->gfx.mec.num_mec; i++)
3008 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3009 			gfx_v7_0_compute_pipe_init(adev, i, j);
3010 
3011 	/* init the queues */
3012 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3013 		r = gfx_v7_0_compute_queue_init(adev, i);
3014 		if (r) {
3015 			gfx_v7_0_cp_compute_fini(adev);
3016 			return r;
3017 		}
3018 	}
3019 
3020 	gfx_v7_0_cp_compute_enable(adev, true);
3021 
3022 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3023 		ring = &adev->gfx.compute_ring[i];
3024 		amdgpu_ring_test_helper(ring);
3025 	}
3026 
3027 	return 0;
3028 }
3029 
3030 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3031 {
3032 	gfx_v7_0_cp_gfx_enable(adev, enable);
3033 	gfx_v7_0_cp_compute_enable(adev, enable);
3034 }
3035 
3036 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3037 {
3038 	int r;
3039 
3040 	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3041 	if (r)
3042 		return r;
3043 	r = gfx_v7_0_cp_compute_load_microcode(adev);
3044 	if (r)
3045 		return r;
3046 
3047 	return 0;
3048 }
3049 
3050 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3051 					       bool enable)
3052 {
3053 	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3054 
3055 	if (enable)
3056 		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3057 				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3058 	else
3059 		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3060 				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3061 	WREG32(mmCP_INT_CNTL_RING0, tmp);
3062 }
3063 
3064 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3065 {
3066 	int r;
3067 
3068 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3069 
3070 	r = gfx_v7_0_cp_load_microcode(adev);
3071 	if (r)
3072 		return r;
3073 
3074 	r = gfx_v7_0_cp_gfx_resume(adev);
3075 	if (r)
3076 		return r;
3077 	r = gfx_v7_0_cp_compute_resume(adev);
3078 	if (r)
3079 		return r;
3080 
3081 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3082 
3083 	return 0;
3084 }
3085 
3086 /**
3087  * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP
3088  *
3089  * @ring: the ring to emit the commands to
3090  *
3091  * Sync the command pipeline with the PFP. E.g. wait for everything
3092  * to be completed.
3093  */
3094 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3095 {
3096 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3097 	uint32_t seq = ring->fence_drv.sync_seq;
3098 	uint64_t addr = ring->fence_drv.gpu_addr;
3099 
3100 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3101 	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3102 				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3103 				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3104 	amdgpu_ring_write(ring, addr & 0xfffffffc);
3105 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3106 	amdgpu_ring_write(ring, seq);
3107 	amdgpu_ring_write(ring, 0xffffffff);
3108 	amdgpu_ring_write(ring, 4); /* poll interval */
3109 
3110 	if (usepfp) {
3111 		/* sync CE with ME to prevent CE fetch CEIB before context switch done */
3112 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3113 		amdgpu_ring_write(ring, 0);
3114 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3115 		amdgpu_ring_write(ring, 0);
3116 	}
3117 }
3118 
3119 /*
3120  * vm
3121  * VMID 0 is the physical GPU addresses as used by the kernel.
3122  * VMIDs 1-15 are used for userspace clients and are handled
3123  * by the amdgpu vm/hsa code.
3124  */
3125 /**
3126  * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3127  *
3128  * @ring: amdgpu_ring pointer
3129  * @vmid: vmid number to use
3130  * @pd_addr: address
3131  *
3132  * Update the page table base and flush the VM TLB
3133  * using the CP (CIK).
3134  */
3135 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3136 					unsigned vmid, uint64_t pd_addr)
3137 {
3138 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3139 
3140 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3141 
3142 	/* wait for the invalidate to complete */
3143 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3144 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3145 				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3146 				 WAIT_REG_MEM_ENGINE(0))); /* me */
3147 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3148 	amdgpu_ring_write(ring, 0);
3149 	amdgpu_ring_write(ring, 0); /* ref */
3150 	amdgpu_ring_write(ring, 0); /* mask */
3151 	amdgpu_ring_write(ring, 0x20); /* poll interval */
3152 
3153 	/* compute doesn't have PFP */
3154 	if (usepfp) {
3155 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3156 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3157 		amdgpu_ring_write(ring, 0x0);
3158 
3159 		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3160 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3161 		amdgpu_ring_write(ring, 0);
3162 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3163 		amdgpu_ring_write(ring, 0);
3164 	}
3165 }
3166 
3167 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3168 				    uint32_t reg, uint32_t val)
3169 {
3170 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3171 
3172 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3173 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3174 				 WRITE_DATA_DST_SEL(0)));
3175 	amdgpu_ring_write(ring, reg);
3176 	amdgpu_ring_write(ring, 0);
3177 	amdgpu_ring_write(ring, val);
3178 }
3179 
3180 /*
3181  * RLC
3182  * The RLC is a multi-purpose microengine that handles a
3183  * variety of functions.
3184  */
3185 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3186 {
3187 	const u32 *src_ptr;
3188 	u32 dws;
3189 	const struct cs_section_def *cs_data;
3190 	int r;
3191 
3192 	/* allocate rlc buffers */
3193 	if (adev->flags & AMD_IS_APU) {
3194 		if (adev->asic_type == CHIP_KAVERI) {
3195 			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3196 			adev->gfx.rlc.reg_list_size =
3197 				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3198 		} else {
3199 			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3200 			adev->gfx.rlc.reg_list_size =
3201 				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3202 		}
3203 	}
3204 	adev->gfx.rlc.cs_data = ci_cs_data;
3205 	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3206 	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3207 
3208 	src_ptr = adev->gfx.rlc.reg_list;
3209 	dws = adev->gfx.rlc.reg_list_size;
3210 	dws += (5 * 16) + 48 + 48 + 64;
3211 
3212 	cs_data = adev->gfx.rlc.cs_data;
3213 
3214 	if (src_ptr) {
3215 		/* init save restore block */
3216 		r = amdgpu_gfx_rlc_init_sr(adev, dws);
3217 		if (r)
3218 			return r;
3219 	}
3220 
3221 	if (cs_data) {
3222 		/* init clear state block */
3223 		r = amdgpu_gfx_rlc_init_csb(adev);
3224 		if (r)
3225 			return r;
3226 	}
3227 
3228 	if (adev->gfx.rlc.cp_table_size) {
3229 		r = amdgpu_gfx_rlc_init_cpt(adev);
3230 		if (r)
3231 			return r;
3232 	}
3233 
3234 	/* init spm vmid with 0xf */
3235 	if (adev->gfx.rlc.funcs->update_spm_vmid)
3236 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
3237 
3238 	return 0;
3239 }
3240 
3241 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3242 {
3243 	u32 tmp;
3244 
3245 	tmp = RREG32(mmRLC_LB_CNTL);
3246 	if (enable)
3247 		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3248 	else
3249 		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3250 	WREG32(mmRLC_LB_CNTL, tmp);
3251 }
3252 
3253 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3254 {
3255 	u32 i, j, k;
3256 	u32 mask;
3257 
3258 	mutex_lock(&adev->grbm_idx_mutex);
3259 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3260 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3261 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3262 			for (k = 0; k < adev->usec_timeout; k++) {
3263 				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3264 					break;
3265 				udelay(1);
3266 			}
3267 		}
3268 	}
3269 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3270 	mutex_unlock(&adev->grbm_idx_mutex);
3271 
3272 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3273 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3274 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3275 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3276 	for (k = 0; k < adev->usec_timeout; k++) {
3277 		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3278 			break;
3279 		udelay(1);
3280 	}
3281 }
3282 
3283 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3284 {
3285 	u32 tmp;
3286 
3287 	tmp = RREG32(mmRLC_CNTL);
3288 	if (tmp != rlc)
3289 		WREG32(mmRLC_CNTL, rlc);
3290 }
3291 
3292 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3293 {
3294 	u32 data, orig;
3295 
3296 	orig = data = RREG32(mmRLC_CNTL);
3297 
3298 	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3299 		u32 i;
3300 
3301 		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3302 		WREG32(mmRLC_CNTL, data);
3303 
3304 		for (i = 0; i < adev->usec_timeout; i++) {
3305 			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3306 				break;
3307 			udelay(1);
3308 		}
3309 
3310 		gfx_v7_0_wait_for_rlc_serdes(adev);
3311 	}
3312 
3313 	return orig;
3314 }
3315 
3316 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3317 {
3318 	return true;
3319 }
3320 
3321 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
3322 {
3323 	u32 tmp, i, mask;
3324 
3325 	tmp = 0x1 | (1 << 1);
3326 	WREG32(mmRLC_GPR_REG2, tmp);
3327 
3328 	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3329 		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3330 	for (i = 0; i < adev->usec_timeout; i++) {
3331 		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3332 			break;
3333 		udelay(1);
3334 	}
3335 
3336 	for (i = 0; i < adev->usec_timeout; i++) {
3337 		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3338 			break;
3339 		udelay(1);
3340 	}
3341 }
3342 
3343 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
3344 {
3345 	u32 tmp;
3346 
3347 	tmp = 0x1 | (0 << 1);
3348 	WREG32(mmRLC_GPR_REG2, tmp);
3349 }
3350 
3351 /**
3352  * gfx_v7_0_rlc_stop - stop the RLC ME
3353  *
3354  * @adev: amdgpu_device pointer
3355  *
3356  * Halt the RLC ME (MicroEngine) (CIK).
3357  */
3358 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3359 {
3360 	WREG32(mmRLC_CNTL, 0);
3361 
3362 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3363 
3364 	gfx_v7_0_wait_for_rlc_serdes(adev);
3365 }
3366 
3367 /**
3368  * gfx_v7_0_rlc_start - start the RLC ME
3369  *
3370  * @adev: amdgpu_device pointer
3371  *
3372  * Unhalt the RLC ME (MicroEngine) (CIK).
3373  */
3374 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3375 {
3376 	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3377 
3378 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3379 
3380 	udelay(50);
3381 }
3382 
3383 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3384 {
3385 	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3386 
3387 	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3388 	WREG32(mmGRBM_SOFT_RESET, tmp);
3389 	udelay(50);
3390 	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3391 	WREG32(mmGRBM_SOFT_RESET, tmp);
3392 	udelay(50);
3393 }
3394 
3395 /**
3396  * gfx_v7_0_rlc_resume - setup the RLC hw
3397  *
3398  * @adev: amdgpu_device pointer
3399  *
3400  * Initialize the RLC registers, load the ucode,
3401  * and start the RLC (CIK).
3402  * Returns 0 for success, -EINVAL if the ucode is not available.
3403  */
3404 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3405 {
3406 	const struct rlc_firmware_header_v1_0 *hdr;
3407 	const __le32 *fw_data;
3408 	unsigned i, fw_size;
3409 	u32 tmp;
3410 
3411 	if (!adev->gfx.rlc_fw)
3412 		return -EINVAL;
3413 
3414 	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3415 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3416 	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3417 	adev->gfx.rlc_feature_version = le32_to_cpu(
3418 					hdr->ucode_feature_version);
3419 
3420 	adev->gfx.rlc.funcs->stop(adev);
3421 
3422 	/* disable CG */
3423 	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3424 	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3425 
3426 	adev->gfx.rlc.funcs->reset(adev);
3427 
3428 	gfx_v7_0_init_pg(adev);
3429 
3430 	WREG32(mmRLC_LB_CNTR_INIT, 0);
3431 	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3432 
3433 	mutex_lock(&adev->grbm_idx_mutex);
3434 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3435 	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3436 	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3437 	WREG32(mmRLC_LB_CNTL, 0x80000004);
3438 	mutex_unlock(&adev->grbm_idx_mutex);
3439 
3440 	WREG32(mmRLC_MC_CNTL, 0);
3441 	WREG32(mmRLC_UCODE_CNTL, 0);
3442 
3443 	fw_data = (const __le32 *)
3444 		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3445 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3446 	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3447 	for (i = 0; i < fw_size; i++)
3448 		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3449 	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3450 
3451 	/* XXX - find out what chips support lbpw */
3452 	gfx_v7_0_enable_lbpw(adev, false);
3453 
3454 	if (adev->asic_type == CHIP_BONAIRE)
3455 		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3456 
3457 	adev->gfx.rlc.funcs->start(adev);
3458 
3459 	return 0;
3460 }
3461 
3462 static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
3463 {
3464 	u32 data;
3465 
3466 	amdgpu_gfx_off_ctrl(adev, false);
3467 
3468 	data = RREG32(mmRLC_SPM_VMID);
3469 
3470 	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
3471 	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
3472 
3473 	WREG32(mmRLC_SPM_VMID, data);
3474 
3475 	amdgpu_gfx_off_ctrl(adev, true);
3476 }
3477 
3478 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3479 {
3480 	u32 data, orig, tmp, tmp2;
3481 
3482 	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3483 
3484 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3485 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3486 
3487 		tmp = gfx_v7_0_halt_rlc(adev);
3488 
3489 		mutex_lock(&adev->grbm_idx_mutex);
3490 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3491 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3492 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3493 		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3494 			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3495 			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3496 		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3497 		mutex_unlock(&adev->grbm_idx_mutex);
3498 
3499 		gfx_v7_0_update_rlc(adev, tmp);
3500 
3501 		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3502 		if (orig != data)
3503 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3504 
3505 	} else {
3506 		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3507 
3508 		RREG32(mmCB_CGTT_SCLK_CTRL);
3509 		RREG32(mmCB_CGTT_SCLK_CTRL);
3510 		RREG32(mmCB_CGTT_SCLK_CTRL);
3511 		RREG32(mmCB_CGTT_SCLK_CTRL);
3512 
3513 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3514 		if (orig != data)
3515 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3516 
3517 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3518 	}
3519 }
3520 
3521 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3522 {
3523 	u32 data, orig, tmp = 0;
3524 
3525 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3526 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3527 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3528 				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3529 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3530 				if (orig != data)
3531 					WREG32(mmCP_MEM_SLP_CNTL, data);
3532 			}
3533 		}
3534 
3535 		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3536 		data |= 0x00000001;
3537 		data &= 0xfffffffd;
3538 		if (orig != data)
3539 			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3540 
3541 		tmp = gfx_v7_0_halt_rlc(adev);
3542 
3543 		mutex_lock(&adev->grbm_idx_mutex);
3544 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3545 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3546 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3547 		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3548 			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3549 		WREG32(mmRLC_SERDES_WR_CTRL, data);
3550 		mutex_unlock(&adev->grbm_idx_mutex);
3551 
3552 		gfx_v7_0_update_rlc(adev, tmp);
3553 
3554 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3555 			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3556 			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3557 			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3558 			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3559 			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3560 			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3561 			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3562 				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3563 			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3564 			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3565 			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3566 			if (orig != data)
3567 				WREG32(mmCGTS_SM_CTRL_REG, data);
3568 		}
3569 	} else {
3570 		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3571 		data |= 0x00000003;
3572 		if (orig != data)
3573 			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3574 
3575 		data = RREG32(mmRLC_MEM_SLP_CNTL);
3576 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3577 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3578 			WREG32(mmRLC_MEM_SLP_CNTL, data);
3579 		}
3580 
3581 		data = RREG32(mmCP_MEM_SLP_CNTL);
3582 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3583 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3584 			WREG32(mmCP_MEM_SLP_CNTL, data);
3585 		}
3586 
3587 		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3588 		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3589 		if (orig != data)
3590 			WREG32(mmCGTS_SM_CTRL_REG, data);
3591 
3592 		tmp = gfx_v7_0_halt_rlc(adev);
3593 
3594 		mutex_lock(&adev->grbm_idx_mutex);
3595 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3596 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3597 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3598 		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3599 		WREG32(mmRLC_SERDES_WR_CTRL, data);
3600 		mutex_unlock(&adev->grbm_idx_mutex);
3601 
3602 		gfx_v7_0_update_rlc(adev, tmp);
3603 	}
3604 }
3605 
3606 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3607 			       bool enable)
3608 {
3609 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3610 	/* order matters! */
3611 	if (enable) {
3612 		gfx_v7_0_enable_mgcg(adev, true);
3613 		gfx_v7_0_enable_cgcg(adev, true);
3614 	} else {
3615 		gfx_v7_0_enable_cgcg(adev, false);
3616 		gfx_v7_0_enable_mgcg(adev, false);
3617 	}
3618 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3619 }
3620 
3621 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3622 						bool enable)
3623 {
3624 	u32 data, orig;
3625 
3626 	orig = data = RREG32(mmRLC_PG_CNTL);
3627 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3628 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3629 	else
3630 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3631 	if (orig != data)
3632 		WREG32(mmRLC_PG_CNTL, data);
3633 }
3634 
3635 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3636 						bool enable)
3637 {
3638 	u32 data, orig;
3639 
3640 	orig = data = RREG32(mmRLC_PG_CNTL);
3641 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3642 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3643 	else
3644 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3645 	if (orig != data)
3646 		WREG32(mmRLC_PG_CNTL, data);
3647 }
3648 
3649 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3650 {
3651 	u32 data, orig;
3652 
3653 	orig = data = RREG32(mmRLC_PG_CNTL);
3654 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3655 		data &= ~0x8000;
3656 	else
3657 		data |= 0x8000;
3658 	if (orig != data)
3659 		WREG32(mmRLC_PG_CNTL, data);
3660 }
3661 
3662 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3663 {
3664 	u32 data, orig;
3665 
3666 	orig = data = RREG32(mmRLC_PG_CNTL);
3667 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3668 		data &= ~0x2000;
3669 	else
3670 		data |= 0x2000;
3671 	if (orig != data)
3672 		WREG32(mmRLC_PG_CNTL, data);
3673 }
3674 
3675 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3676 {
3677 	if (adev->asic_type == CHIP_KAVERI)
3678 		return 5;
3679 	else
3680 		return 4;
3681 }
3682 
3683 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3684 				     bool enable)
3685 {
3686 	u32 data, orig;
3687 
3688 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3689 		orig = data = RREG32(mmRLC_PG_CNTL);
3690 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3691 		if (orig != data)
3692 			WREG32(mmRLC_PG_CNTL, data);
3693 
3694 		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3695 		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3696 		if (orig != data)
3697 			WREG32(mmRLC_AUTO_PG_CTRL, data);
3698 	} else {
3699 		orig = data = RREG32(mmRLC_PG_CNTL);
3700 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3701 		if (orig != data)
3702 			WREG32(mmRLC_PG_CNTL, data);
3703 
3704 		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3705 		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3706 		if (orig != data)
3707 			WREG32(mmRLC_AUTO_PG_CTRL, data);
3708 
3709 		data = RREG32(mmDB_RENDER_CONTROL);
3710 	}
3711 }
3712 
3713 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3714 						 u32 bitmap)
3715 {
3716 	u32 data;
3717 
3718 	if (!bitmap)
3719 		return;
3720 
3721 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3722 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3723 
3724 	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3725 }
3726 
3727 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3728 {
3729 	u32 data, mask;
3730 
3731 	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3732 	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3733 
3734 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3735 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3736 
3737 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3738 
3739 	return (~data) & mask;
3740 }
3741 
3742 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3743 {
3744 	u32 tmp;
3745 
3746 	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3747 
3748 	tmp = RREG32(mmRLC_MAX_PG_CU);
3749 	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3750 	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3751 	WREG32(mmRLC_MAX_PG_CU, tmp);
3752 }
3753 
3754 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3755 					    bool enable)
3756 {
3757 	u32 data, orig;
3758 
3759 	orig = data = RREG32(mmRLC_PG_CNTL);
3760 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3761 		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3762 	else
3763 		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3764 	if (orig != data)
3765 		WREG32(mmRLC_PG_CNTL, data);
3766 }
3767 
3768 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3769 					     bool enable)
3770 {
3771 	u32 data, orig;
3772 
3773 	orig = data = RREG32(mmRLC_PG_CNTL);
3774 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3775 		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3776 	else
3777 		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3778 	if (orig != data)
3779 		WREG32(mmRLC_PG_CNTL, data);
3780 }
3781 
3782 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3783 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
3784 
3785 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3786 {
3787 	u32 data, orig;
3788 	u32 i;
3789 
3790 	if (adev->gfx.rlc.cs_data) {
3791 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3792 		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3793 		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3794 		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3795 	} else {
3796 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3797 		for (i = 0; i < 3; i++)
3798 			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3799 	}
3800 	if (adev->gfx.rlc.reg_list) {
3801 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3802 		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3803 			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3804 	}
3805 
3806 	orig = data = RREG32(mmRLC_PG_CNTL);
3807 	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3808 	if (orig != data)
3809 		WREG32(mmRLC_PG_CNTL, data);
3810 
3811 	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3812 	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3813 
3814 	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3815 	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3816 	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3817 	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3818 
3819 	data = 0x10101010;
3820 	WREG32(mmRLC_PG_DELAY, data);
3821 
3822 	data = RREG32(mmRLC_PG_DELAY_2);
3823 	data &= ~0xff;
3824 	data |= 0x3;
3825 	WREG32(mmRLC_PG_DELAY_2, data);
3826 
3827 	data = RREG32(mmRLC_AUTO_PG_CTRL);
3828 	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3829 	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3830 	WREG32(mmRLC_AUTO_PG_CTRL, data);
3831 
3832 }
3833 
3834 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3835 {
3836 	gfx_v7_0_enable_gfx_cgpg(adev, enable);
3837 	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3838 	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3839 }
3840 
3841 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3842 {
3843 	u32 count = 0;
3844 	const struct cs_section_def *sect = NULL;
3845 	const struct cs_extent_def *ext = NULL;
3846 
3847 	if (adev->gfx.rlc.cs_data == NULL)
3848 		return 0;
3849 
3850 	/* begin clear state */
3851 	count += 2;
3852 	/* context control state */
3853 	count += 3;
3854 
3855 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3856 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3857 			if (sect->id == SECT_CONTEXT)
3858 				count += 2 + ext->reg_count;
3859 			else
3860 				return 0;
3861 		}
3862 	}
3863 	/* pa_sc_raster_config/pa_sc_raster_config1 */
3864 	count += 4;
3865 	/* end clear state */
3866 	count += 2;
3867 	/* clear state */
3868 	count += 2;
3869 
3870 	return count;
3871 }
3872 
3873 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3874 				    volatile u32 *buffer)
3875 {
3876 	u32 count = 0, i;
3877 	const struct cs_section_def *sect = NULL;
3878 	const struct cs_extent_def *ext = NULL;
3879 
3880 	if (adev->gfx.rlc.cs_data == NULL)
3881 		return;
3882 	if (buffer == NULL)
3883 		return;
3884 
3885 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3886 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3887 
3888 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3889 	buffer[count++] = cpu_to_le32(0x80000000);
3890 	buffer[count++] = cpu_to_le32(0x80000000);
3891 
3892 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3893 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3894 			if (sect->id == SECT_CONTEXT) {
3895 				buffer[count++] =
3896 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
3897 				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3898 				for (i = 0; i < ext->reg_count; i++)
3899 					buffer[count++] = cpu_to_le32(ext->extent[i]);
3900 			} else {
3901 				return;
3902 			}
3903 		}
3904 	}
3905 
3906 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3907 	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
3908 	switch (adev->asic_type) {
3909 	case CHIP_BONAIRE:
3910 		buffer[count++] = cpu_to_le32(0x16000012);
3911 		buffer[count++] = cpu_to_le32(0x00000000);
3912 		break;
3913 	case CHIP_KAVERI:
3914 		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3915 		buffer[count++] = cpu_to_le32(0x00000000);
3916 		break;
3917 	case CHIP_KABINI:
3918 	case CHIP_MULLINS:
3919 		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3920 		buffer[count++] = cpu_to_le32(0x00000000);
3921 		break;
3922 	case CHIP_HAWAII:
3923 		buffer[count++] = cpu_to_le32(0x3a00161a);
3924 		buffer[count++] = cpu_to_le32(0x0000002e);
3925 		break;
3926 	default:
3927 		buffer[count++] = cpu_to_le32(0x00000000);
3928 		buffer[count++] = cpu_to_le32(0x00000000);
3929 		break;
3930 	}
3931 
3932 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3933 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
3934 
3935 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
3936 	buffer[count++] = cpu_to_le32(0);
3937 }
3938 
3939 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
3940 {
3941 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3942 			      AMD_PG_SUPPORT_GFX_SMG |
3943 			      AMD_PG_SUPPORT_GFX_DMG |
3944 			      AMD_PG_SUPPORT_CP |
3945 			      AMD_PG_SUPPORT_GDS |
3946 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3947 		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
3948 		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
3949 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3950 			gfx_v7_0_init_gfx_cgpg(adev);
3951 			gfx_v7_0_enable_cp_pg(adev, true);
3952 			gfx_v7_0_enable_gds_pg(adev, true);
3953 		}
3954 		gfx_v7_0_init_ao_cu_mask(adev);
3955 		gfx_v7_0_update_gfx_pg(adev, true);
3956 	}
3957 }
3958 
3959 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
3960 {
3961 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3962 			      AMD_PG_SUPPORT_GFX_SMG |
3963 			      AMD_PG_SUPPORT_GFX_DMG |
3964 			      AMD_PG_SUPPORT_CP |
3965 			      AMD_PG_SUPPORT_GDS |
3966 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3967 		gfx_v7_0_update_gfx_pg(adev, false);
3968 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3969 			gfx_v7_0_enable_cp_pg(adev, false);
3970 			gfx_v7_0_enable_gds_pg(adev, false);
3971 		}
3972 	}
3973 }
3974 
3975 /**
3976  * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
3977  *
3978  * @adev: amdgpu_device pointer
3979  *
3980  * Fetches a GPU clock counter snapshot (SI).
3981  * Returns the 64 bit clock counter snapshot.
3982  */
3983 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3984 {
3985 	uint64_t clock;
3986 
3987 	mutex_lock(&adev->gfx.gpu_clock_mutex);
3988 	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3989 	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
3990 		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3991 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
3992 	return clock;
3993 }
3994 
3995 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3996 					  uint32_t vmid,
3997 					  uint32_t gds_base, uint32_t gds_size,
3998 					  uint32_t gws_base, uint32_t gws_size,
3999 					  uint32_t oa_base, uint32_t oa_size)
4000 {
4001 	/* GDS Base */
4002 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4003 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4004 				WRITE_DATA_DST_SEL(0)));
4005 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4006 	amdgpu_ring_write(ring, 0);
4007 	amdgpu_ring_write(ring, gds_base);
4008 
4009 	/* GDS Size */
4010 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4011 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4012 				WRITE_DATA_DST_SEL(0)));
4013 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4014 	amdgpu_ring_write(ring, 0);
4015 	amdgpu_ring_write(ring, gds_size);
4016 
4017 	/* GWS */
4018 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4019 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4020 				WRITE_DATA_DST_SEL(0)));
4021 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4022 	amdgpu_ring_write(ring, 0);
4023 	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4024 
4025 	/* OA */
4026 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4027 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4028 				WRITE_DATA_DST_SEL(0)));
4029 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4030 	amdgpu_ring_write(ring, 0);
4031 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4032 }
4033 
4034 static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4035 {
4036 	struct amdgpu_device *adev = ring->adev;
4037 	uint32_t value = 0;
4038 
4039 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4040 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4041 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4042 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4043 	WREG32(mmSQ_CMD, value);
4044 }
4045 
4046 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4047 {
4048 	WREG32(mmSQ_IND_INDEX,
4049 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4050 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4051 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4052 		(SQ_IND_INDEX__FORCE_READ_MASK));
4053 	return RREG32(mmSQ_IND_DATA);
4054 }
4055 
4056 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4057 			   uint32_t wave, uint32_t thread,
4058 			   uint32_t regno, uint32_t num, uint32_t *out)
4059 {
4060 	WREG32(mmSQ_IND_INDEX,
4061 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4062 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4063 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4064 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4065 		(SQ_IND_INDEX__FORCE_READ_MASK) |
4066 		(SQ_IND_INDEX__AUTO_INCR_MASK));
4067 	while (num--)
4068 		*(out++) = RREG32(mmSQ_IND_DATA);
4069 }
4070 
4071 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4072 {
4073 	/* type 0 wave data */
4074 	dst[(*no_fields)++] = 0;
4075 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4076 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4077 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4078 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4079 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4080 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4081 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4082 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4083 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4084 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4085 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4086 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4087 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4088 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4089 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4090 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4091 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4092 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4093 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
4094 }
4095 
4096 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
4097 				     uint32_t wave, uint32_t start,
4098 				     uint32_t size, uint32_t *dst)
4099 {
4100 	wave_read_regs(
4101 		adev, simd, wave, 0,
4102 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4103 }
4104 
4105 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4106 				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
4107 {
4108 	cik_srbm_select(adev, me, pipe, q, vm);
4109 }
4110 
4111 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4112 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4113 	.select_se_sh = &gfx_v7_0_select_se_sh,
4114 	.read_wave_data = &gfx_v7_0_read_wave_data,
4115 	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4116 	.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4117 };
4118 
4119 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4120 	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
4121 	.set_safe_mode = gfx_v7_0_set_safe_mode,
4122 	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
4123 	.init = gfx_v7_0_rlc_init,
4124 	.get_csb_size = gfx_v7_0_get_csb_size,
4125 	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
4126 	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
4127 	.resume = gfx_v7_0_rlc_resume,
4128 	.stop = gfx_v7_0_rlc_stop,
4129 	.reset = gfx_v7_0_rlc_reset,
4130 	.start = gfx_v7_0_rlc_start,
4131 	.update_spm_vmid = gfx_v7_0_update_spm_vmid
4132 };
4133 
4134 static int gfx_v7_0_early_init(void *handle)
4135 {
4136 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4137 
4138 	adev->gfx.xcc_mask = 1;
4139 	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4140 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4141 					  AMDGPU_MAX_COMPUTE_RINGS);
4142 	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4143 	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4144 	gfx_v7_0_set_ring_funcs(adev);
4145 	gfx_v7_0_set_irq_funcs(adev);
4146 	gfx_v7_0_set_gds_init(adev);
4147 
4148 	return 0;
4149 }
4150 
4151 static int gfx_v7_0_late_init(void *handle)
4152 {
4153 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4154 	int r;
4155 
4156 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4157 	if (r)
4158 		return r;
4159 
4160 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4161 	if (r)
4162 		return r;
4163 
4164 	return 0;
4165 }
4166 
4167 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4168 {
4169 	u32 gb_addr_config;
4170 	u32 mc_arb_ramcfg;
4171 	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4172 	u32 tmp;
4173 
4174 	switch (adev->asic_type) {
4175 	case CHIP_BONAIRE:
4176 		adev->gfx.config.max_shader_engines = 2;
4177 		adev->gfx.config.max_tile_pipes = 4;
4178 		adev->gfx.config.max_cu_per_sh = 7;
4179 		adev->gfx.config.max_sh_per_se = 1;
4180 		adev->gfx.config.max_backends_per_se = 2;
4181 		adev->gfx.config.max_texture_channel_caches = 4;
4182 		adev->gfx.config.max_gprs = 256;
4183 		adev->gfx.config.max_gs_threads = 32;
4184 		adev->gfx.config.max_hw_contexts = 8;
4185 
4186 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4187 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4188 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4189 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4190 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4191 		break;
4192 	case CHIP_HAWAII:
4193 		adev->gfx.config.max_shader_engines = 4;
4194 		adev->gfx.config.max_tile_pipes = 16;
4195 		adev->gfx.config.max_cu_per_sh = 11;
4196 		adev->gfx.config.max_sh_per_se = 1;
4197 		adev->gfx.config.max_backends_per_se = 4;
4198 		adev->gfx.config.max_texture_channel_caches = 16;
4199 		adev->gfx.config.max_gprs = 256;
4200 		adev->gfx.config.max_gs_threads = 32;
4201 		adev->gfx.config.max_hw_contexts = 8;
4202 
4203 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4204 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4205 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4206 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4207 		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4208 		break;
4209 	case CHIP_KAVERI:
4210 		adev->gfx.config.max_shader_engines = 1;
4211 		adev->gfx.config.max_tile_pipes = 4;
4212 		adev->gfx.config.max_cu_per_sh = 8;
4213 		adev->gfx.config.max_backends_per_se = 2;
4214 		adev->gfx.config.max_sh_per_se = 1;
4215 		adev->gfx.config.max_texture_channel_caches = 4;
4216 		adev->gfx.config.max_gprs = 256;
4217 		adev->gfx.config.max_gs_threads = 16;
4218 		adev->gfx.config.max_hw_contexts = 8;
4219 
4220 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4221 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4222 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4223 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4224 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4225 		break;
4226 	case CHIP_KABINI:
4227 	case CHIP_MULLINS:
4228 	default:
4229 		adev->gfx.config.max_shader_engines = 1;
4230 		adev->gfx.config.max_tile_pipes = 2;
4231 		adev->gfx.config.max_cu_per_sh = 2;
4232 		adev->gfx.config.max_sh_per_se = 1;
4233 		adev->gfx.config.max_backends_per_se = 1;
4234 		adev->gfx.config.max_texture_channel_caches = 2;
4235 		adev->gfx.config.max_gprs = 256;
4236 		adev->gfx.config.max_gs_threads = 16;
4237 		adev->gfx.config.max_hw_contexts = 8;
4238 
4239 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4240 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4241 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4242 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4243 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4244 		break;
4245 	}
4246 
4247 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4248 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4249 
4250 	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
4251 				MC_ARB_RAMCFG, NOOFBANK);
4252 	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
4253 				MC_ARB_RAMCFG, NOOFRANKS);
4254 
4255 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4256 	adev->gfx.config.mem_max_burst_length_bytes = 256;
4257 	if (adev->flags & AMD_IS_APU) {
4258 		/* Get memory bank mapping mode. */
4259 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4260 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4261 		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4262 
4263 		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4264 		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4265 		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4266 
4267 		/* Validate settings in case only one DIMM installed. */
4268 		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4269 			dimm00_addr_map = 0;
4270 		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4271 			dimm01_addr_map = 0;
4272 		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4273 			dimm10_addr_map = 0;
4274 		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4275 			dimm11_addr_map = 0;
4276 
4277 		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4278 		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4279 		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4280 			adev->gfx.config.mem_row_size_in_kb = 2;
4281 		else
4282 			adev->gfx.config.mem_row_size_in_kb = 1;
4283 	} else {
4284 		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4285 		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4286 		if (adev->gfx.config.mem_row_size_in_kb > 4)
4287 			adev->gfx.config.mem_row_size_in_kb = 4;
4288 	}
4289 	/* XXX use MC settings? */
4290 	adev->gfx.config.shader_engine_tile_size = 32;
4291 	adev->gfx.config.num_gpus = 1;
4292 	adev->gfx.config.multi_gpu_tile_size = 64;
4293 
4294 	/* fix up row size */
4295 	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4296 	switch (adev->gfx.config.mem_row_size_in_kb) {
4297 	case 1:
4298 	default:
4299 		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4300 		break;
4301 	case 2:
4302 		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4303 		break;
4304 	case 4:
4305 		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4306 		break;
4307 	}
4308 	adev->gfx.config.gb_addr_config = gb_addr_config;
4309 }
4310 
4311 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4312 					int mec, int pipe, int queue)
4313 {
4314 	int r;
4315 	unsigned irq_type;
4316 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4317 
4318 	/* mec0 is me1 */
4319 	ring->me = mec + 1;
4320 	ring->pipe = pipe;
4321 	ring->queue = queue;
4322 
4323 	ring->ring_obj = NULL;
4324 	ring->use_doorbell = true;
4325 	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4326 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4327 
4328 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4329 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4330 		+ ring->pipe;
4331 
4332 	/* type-2 packets are deprecated on MEC, use type-3 instead */
4333 	r = amdgpu_ring_init(adev, ring, 1024,
4334 			     &adev->gfx.eop_irq, irq_type,
4335 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
4336 	if (r)
4337 		return r;
4338 
4339 
4340 	return 0;
4341 }
4342 
4343 static int gfx_v7_0_sw_init(void *handle)
4344 {
4345 	struct amdgpu_ring *ring;
4346 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4347 	int i, j, k, r, ring_id;
4348 
4349 	switch (adev->asic_type) {
4350 	case CHIP_KAVERI:
4351 		adev->gfx.mec.num_mec = 2;
4352 		break;
4353 	case CHIP_BONAIRE:
4354 	case CHIP_HAWAII:
4355 	case CHIP_KABINI:
4356 	case CHIP_MULLINS:
4357 	default:
4358 		adev->gfx.mec.num_mec = 1;
4359 		break;
4360 	}
4361 	adev->gfx.mec.num_pipe_per_mec = 4;
4362 	adev->gfx.mec.num_queue_per_pipe = 8;
4363 
4364 	/* EOP Event */
4365 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4366 	if (r)
4367 		return r;
4368 
4369 	/* Privileged reg */
4370 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4371 			      &adev->gfx.priv_reg_irq);
4372 	if (r)
4373 		return r;
4374 
4375 	/* Privileged inst */
4376 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4377 			      &adev->gfx.priv_inst_irq);
4378 	if (r)
4379 		return r;
4380 
4381 	r = gfx_v7_0_init_microcode(adev);
4382 	if (r) {
4383 		DRM_ERROR("Failed to load gfx firmware!\n");
4384 		return r;
4385 	}
4386 
4387 	r = adev->gfx.rlc.funcs->init(adev);
4388 	if (r) {
4389 		DRM_ERROR("Failed to init rlc BOs!\n");
4390 		return r;
4391 	}
4392 
4393 	/* allocate mec buffers */
4394 	r = gfx_v7_0_mec_init(adev);
4395 	if (r) {
4396 		DRM_ERROR("Failed to init MEC BOs!\n");
4397 		return r;
4398 	}
4399 
4400 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4401 		ring = &adev->gfx.gfx_ring[i];
4402 		ring->ring_obj = NULL;
4403 		sprintf(ring->name, "gfx");
4404 		r = amdgpu_ring_init(adev, ring, 1024,
4405 				     &adev->gfx.eop_irq,
4406 				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
4407 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
4408 		if (r)
4409 			return r;
4410 	}
4411 
4412 	/* set up the compute queues - allocate horizontally across pipes */
4413 	ring_id = 0;
4414 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4415 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4416 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4417 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
4418 								     k, j))
4419 					continue;
4420 
4421 				r = gfx_v7_0_compute_ring_init(adev,
4422 								ring_id,
4423 								i, k, j);
4424 				if (r)
4425 					return r;
4426 
4427 				ring_id++;
4428 			}
4429 		}
4430 	}
4431 
4432 	adev->gfx.ce_ram_size = 0x8000;
4433 
4434 	gfx_v7_0_gpu_early_init(adev);
4435 
4436 	return r;
4437 }
4438 
4439 static int gfx_v7_0_sw_fini(void *handle)
4440 {
4441 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4442 	int i;
4443 
4444 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4445 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4446 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4447 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4448 
4449 	gfx_v7_0_cp_compute_fini(adev);
4450 	amdgpu_gfx_rlc_fini(adev);
4451 	gfx_v7_0_mec_fini(adev);
4452 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4453 				&adev->gfx.rlc.clear_state_gpu_addr,
4454 				(void **)&adev->gfx.rlc.cs_ptr);
4455 	if (adev->gfx.rlc.cp_table_size) {
4456 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4457 				&adev->gfx.rlc.cp_table_gpu_addr,
4458 				(void **)&adev->gfx.rlc.cp_table_ptr);
4459 	}
4460 	gfx_v7_0_free_microcode(adev);
4461 
4462 	return 0;
4463 }
4464 
4465 static int gfx_v7_0_hw_init(void *handle)
4466 {
4467 	int r;
4468 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4469 
4470 	gfx_v7_0_constants_init(adev);
4471 
4472 	/* init CSB */
4473 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
4474 	/* init rlc */
4475 	r = adev->gfx.rlc.funcs->resume(adev);
4476 	if (r)
4477 		return r;
4478 
4479 	r = gfx_v7_0_cp_resume(adev);
4480 	if (r)
4481 		return r;
4482 
4483 	return r;
4484 }
4485 
4486 static int gfx_v7_0_hw_fini(void *handle)
4487 {
4488 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4489 
4490 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4491 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4492 	gfx_v7_0_cp_enable(adev, false);
4493 	adev->gfx.rlc.funcs->stop(adev);
4494 	gfx_v7_0_fini_pg(adev);
4495 
4496 	return 0;
4497 }
4498 
4499 static int gfx_v7_0_suspend(void *handle)
4500 {
4501 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4502 
4503 	return gfx_v7_0_hw_fini(adev);
4504 }
4505 
4506 static int gfx_v7_0_resume(void *handle)
4507 {
4508 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4509 
4510 	return gfx_v7_0_hw_init(adev);
4511 }
4512 
4513 static bool gfx_v7_0_is_idle(void *handle)
4514 {
4515 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4516 
4517 	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4518 		return false;
4519 	else
4520 		return true;
4521 }
4522 
4523 static int gfx_v7_0_wait_for_idle(void *handle)
4524 {
4525 	unsigned i;
4526 	u32 tmp;
4527 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4528 
4529 	for (i = 0; i < adev->usec_timeout; i++) {
4530 		/* read MC_STATUS */
4531 		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4532 
4533 		if (!tmp)
4534 			return 0;
4535 		udelay(1);
4536 	}
4537 	return -ETIMEDOUT;
4538 }
4539 
4540 static int gfx_v7_0_soft_reset(void *handle)
4541 {
4542 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4543 	u32 tmp;
4544 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4545 
4546 	/* GRBM_STATUS */
4547 	tmp = RREG32(mmGRBM_STATUS);
4548 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4549 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4550 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4551 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4552 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4553 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4554 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4555 			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4556 
4557 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4558 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4559 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4560 	}
4561 
4562 	/* GRBM_STATUS2 */
4563 	tmp = RREG32(mmGRBM_STATUS2);
4564 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4565 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4566 
4567 	/* SRBM_STATUS */
4568 	tmp = RREG32(mmSRBM_STATUS);
4569 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4570 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4571 
4572 	if (grbm_soft_reset || srbm_soft_reset) {
4573 		/* disable CG/PG */
4574 		gfx_v7_0_fini_pg(adev);
4575 		gfx_v7_0_update_cg(adev, false);
4576 
4577 		/* stop the rlc */
4578 		adev->gfx.rlc.funcs->stop(adev);
4579 
4580 		/* Disable GFX parsing/prefetching */
4581 		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4582 
4583 		/* Disable MEC parsing/prefetching */
4584 		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4585 
4586 		if (grbm_soft_reset) {
4587 			tmp = RREG32(mmGRBM_SOFT_RESET);
4588 			tmp |= grbm_soft_reset;
4589 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4590 			WREG32(mmGRBM_SOFT_RESET, tmp);
4591 			tmp = RREG32(mmGRBM_SOFT_RESET);
4592 
4593 			udelay(50);
4594 
4595 			tmp &= ~grbm_soft_reset;
4596 			WREG32(mmGRBM_SOFT_RESET, tmp);
4597 			tmp = RREG32(mmGRBM_SOFT_RESET);
4598 		}
4599 
4600 		if (srbm_soft_reset) {
4601 			tmp = RREG32(mmSRBM_SOFT_RESET);
4602 			tmp |= srbm_soft_reset;
4603 			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4604 			WREG32(mmSRBM_SOFT_RESET, tmp);
4605 			tmp = RREG32(mmSRBM_SOFT_RESET);
4606 
4607 			udelay(50);
4608 
4609 			tmp &= ~srbm_soft_reset;
4610 			WREG32(mmSRBM_SOFT_RESET, tmp);
4611 			tmp = RREG32(mmSRBM_SOFT_RESET);
4612 		}
4613 		/* Wait a little for things to settle down */
4614 		udelay(50);
4615 	}
4616 	return 0;
4617 }
4618 
4619 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4620 						 enum amdgpu_interrupt_state state)
4621 {
4622 	u32 cp_int_cntl;
4623 
4624 	switch (state) {
4625 	case AMDGPU_IRQ_STATE_DISABLE:
4626 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4627 		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4628 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4629 		break;
4630 	case AMDGPU_IRQ_STATE_ENABLE:
4631 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4632 		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4633 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4634 		break;
4635 	default:
4636 		break;
4637 	}
4638 }
4639 
4640 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4641 						     int me, int pipe,
4642 						     enum amdgpu_interrupt_state state)
4643 {
4644 	u32 mec_int_cntl, mec_int_cntl_reg;
4645 
4646 	/*
4647 	 * amdgpu controls only the first MEC. That's why this function only
4648 	 * handles the setting of interrupts for this specific MEC. All other
4649 	 * pipes' interrupts are set by amdkfd.
4650 	 */
4651 
4652 	if (me == 1) {
4653 		switch (pipe) {
4654 		case 0:
4655 			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4656 			break;
4657 		case 1:
4658 			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
4659 			break;
4660 		case 2:
4661 			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
4662 			break;
4663 		case 3:
4664 			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
4665 			break;
4666 		default:
4667 			DRM_DEBUG("invalid pipe %d\n", pipe);
4668 			return;
4669 		}
4670 	} else {
4671 		DRM_DEBUG("invalid me %d\n", me);
4672 		return;
4673 	}
4674 
4675 	switch (state) {
4676 	case AMDGPU_IRQ_STATE_DISABLE:
4677 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4678 		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4679 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4680 		break;
4681 	case AMDGPU_IRQ_STATE_ENABLE:
4682 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4683 		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4684 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4685 		break;
4686 	default:
4687 		break;
4688 	}
4689 }
4690 
4691 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4692 					     struct amdgpu_irq_src *src,
4693 					     unsigned type,
4694 					     enum amdgpu_interrupt_state state)
4695 {
4696 	u32 cp_int_cntl;
4697 
4698 	switch (state) {
4699 	case AMDGPU_IRQ_STATE_DISABLE:
4700 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4701 		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4702 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4703 		break;
4704 	case AMDGPU_IRQ_STATE_ENABLE:
4705 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4706 		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4707 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4708 		break;
4709 	default:
4710 		break;
4711 	}
4712 
4713 	return 0;
4714 }
4715 
4716 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4717 					      struct amdgpu_irq_src *src,
4718 					      unsigned type,
4719 					      enum amdgpu_interrupt_state state)
4720 {
4721 	u32 cp_int_cntl;
4722 
4723 	switch (state) {
4724 	case AMDGPU_IRQ_STATE_DISABLE:
4725 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4726 		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4727 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4728 		break;
4729 	case AMDGPU_IRQ_STATE_ENABLE:
4730 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4731 		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4732 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4733 		break;
4734 	default:
4735 		break;
4736 	}
4737 
4738 	return 0;
4739 }
4740 
4741 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4742 					    struct amdgpu_irq_src *src,
4743 					    unsigned type,
4744 					    enum amdgpu_interrupt_state state)
4745 {
4746 	switch (type) {
4747 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4748 		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4749 		break;
4750 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4751 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4752 		break;
4753 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4754 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4755 		break;
4756 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4757 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4758 		break;
4759 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4760 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4761 		break;
4762 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4763 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4764 		break;
4765 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4766 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4767 		break;
4768 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4769 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4770 		break;
4771 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4772 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4773 		break;
4774 	default:
4775 		break;
4776 	}
4777 	return 0;
4778 }
4779 
4780 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4781 			    struct amdgpu_irq_src *source,
4782 			    struct amdgpu_iv_entry *entry)
4783 {
4784 	u8 me_id, pipe_id;
4785 	struct amdgpu_ring *ring;
4786 	int i;
4787 
4788 	DRM_DEBUG("IH: CP EOP\n");
4789 	me_id = (entry->ring_id & 0x0c) >> 2;
4790 	pipe_id = (entry->ring_id & 0x03) >> 0;
4791 	switch (me_id) {
4792 	case 0:
4793 		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4794 		break;
4795 	case 1:
4796 	case 2:
4797 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4798 			ring = &adev->gfx.compute_ring[i];
4799 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4800 				amdgpu_fence_process(ring);
4801 		}
4802 		break;
4803 	}
4804 	return 0;
4805 }
4806 
4807 static void gfx_v7_0_fault(struct amdgpu_device *adev,
4808 			   struct amdgpu_iv_entry *entry)
4809 {
4810 	struct amdgpu_ring *ring;
4811 	u8 me_id, pipe_id;
4812 	int i;
4813 
4814 	me_id = (entry->ring_id & 0x0c) >> 2;
4815 	pipe_id = (entry->ring_id & 0x03) >> 0;
4816 	switch (me_id) {
4817 	case 0:
4818 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4819 		break;
4820 	case 1:
4821 	case 2:
4822 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4823 			ring = &adev->gfx.compute_ring[i];
4824 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4825 				drm_sched_fault(&ring->sched);
4826 		}
4827 		break;
4828 	}
4829 }
4830 
4831 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4832 				 struct amdgpu_irq_src *source,
4833 				 struct amdgpu_iv_entry *entry)
4834 {
4835 	DRM_ERROR("Illegal register access in command stream\n");
4836 	gfx_v7_0_fault(adev, entry);
4837 	return 0;
4838 }
4839 
4840 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4841 				  struct amdgpu_irq_src *source,
4842 				  struct amdgpu_iv_entry *entry)
4843 {
4844 	DRM_ERROR("Illegal instruction in command stream\n");
4845 	// XXX soft reset the gfx block only
4846 	gfx_v7_0_fault(adev, entry);
4847 	return 0;
4848 }
4849 
4850 static int gfx_v7_0_set_clockgating_state(void *handle,
4851 					  enum amd_clockgating_state state)
4852 {
4853 	bool gate = false;
4854 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4855 
4856 	if (state == AMD_CG_STATE_GATE)
4857 		gate = true;
4858 
4859 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4860 	/* order matters! */
4861 	if (gate) {
4862 		gfx_v7_0_enable_mgcg(adev, true);
4863 		gfx_v7_0_enable_cgcg(adev, true);
4864 	} else {
4865 		gfx_v7_0_enable_cgcg(adev, false);
4866 		gfx_v7_0_enable_mgcg(adev, false);
4867 	}
4868 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4869 
4870 	return 0;
4871 }
4872 
4873 static int gfx_v7_0_set_powergating_state(void *handle,
4874 					  enum amd_powergating_state state)
4875 {
4876 	bool gate = false;
4877 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4878 
4879 	if (state == AMD_PG_STATE_GATE)
4880 		gate = true;
4881 
4882 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4883 			      AMD_PG_SUPPORT_GFX_SMG |
4884 			      AMD_PG_SUPPORT_GFX_DMG |
4885 			      AMD_PG_SUPPORT_CP |
4886 			      AMD_PG_SUPPORT_GDS |
4887 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4888 		gfx_v7_0_update_gfx_pg(adev, gate);
4889 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4890 			gfx_v7_0_enable_cp_pg(adev, gate);
4891 			gfx_v7_0_enable_gds_pg(adev, gate);
4892 		}
4893 	}
4894 
4895 	return 0;
4896 }
4897 
4898 static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
4899 {
4900 	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
4901 	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
4902 			  PACKET3_TC_ACTION_ENA |
4903 			  PACKET3_SH_KCACHE_ACTION_ENA |
4904 			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
4905 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
4906 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
4907 	amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
4908 }
4909 
4910 static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
4911 {
4912 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
4913 	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
4914 			  PACKET3_TC_ACTION_ENA |
4915 			  PACKET3_SH_KCACHE_ACTION_ENA |
4916 			  PACKET3_SH_ICACHE_ACTION_ENA);  /* CP_COHER_CNTL */
4917 	amdgpu_ring_write(ring, 0xffffffff);	/* CP_COHER_SIZE */
4918 	amdgpu_ring_write(ring, 0xff);		/* CP_COHER_SIZE_HI */
4919 	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE */
4920 	amdgpu_ring_write(ring, 0);		/* CP_COHER_BASE_HI */
4921 	amdgpu_ring_write(ring, 0x0000000A);	/* poll interval */
4922 }
4923 
4924 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
4925 	.name = "gfx_v7_0",
4926 	.early_init = gfx_v7_0_early_init,
4927 	.late_init = gfx_v7_0_late_init,
4928 	.sw_init = gfx_v7_0_sw_init,
4929 	.sw_fini = gfx_v7_0_sw_fini,
4930 	.hw_init = gfx_v7_0_hw_init,
4931 	.hw_fini = gfx_v7_0_hw_fini,
4932 	.suspend = gfx_v7_0_suspend,
4933 	.resume = gfx_v7_0_resume,
4934 	.is_idle = gfx_v7_0_is_idle,
4935 	.wait_for_idle = gfx_v7_0_wait_for_idle,
4936 	.soft_reset = gfx_v7_0_soft_reset,
4937 	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
4938 	.set_powergating_state = gfx_v7_0_set_powergating_state,
4939 	.dump_ip_state = NULL,
4940 	.print_ip_state = NULL,
4941 };
4942 
4943 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
4944 	.type = AMDGPU_RING_TYPE_GFX,
4945 	.align_mask = 0xff,
4946 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4947 	.support_64bit_ptrs = false,
4948 	.get_rptr = gfx_v7_0_ring_get_rptr,
4949 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
4950 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
4951 	.emit_frame_size =
4952 		20 + /* gfx_v7_0_ring_emit_gds_switch */
4953 		7 + /* gfx_v7_0_ring_emit_hdp_flush */
4954 		5 + /* hdp invalidate */
4955 		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
4956 		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
4957 		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
4958 		3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
4959 		5, /* SURFACE_SYNC */
4960 	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
4961 	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
4962 	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
4963 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
4964 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
4965 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
4966 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
4967 	.test_ring = gfx_v7_0_ring_test_ring,
4968 	.test_ib = gfx_v7_0_ring_test_ib,
4969 	.insert_nop = amdgpu_ring_insert_nop,
4970 	.pad_ib = amdgpu_ring_generic_pad_ib,
4971 	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
4972 	.emit_wreg = gfx_v7_0_ring_emit_wreg,
4973 	.soft_recovery = gfx_v7_0_ring_soft_recovery,
4974 	.emit_mem_sync = gfx_v7_0_emit_mem_sync,
4975 };
4976 
4977 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
4978 	.type = AMDGPU_RING_TYPE_COMPUTE,
4979 	.align_mask = 0xff,
4980 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4981 	.support_64bit_ptrs = false,
4982 	.get_rptr = gfx_v7_0_ring_get_rptr,
4983 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
4984 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
4985 	.emit_frame_size =
4986 		20 + /* gfx_v7_0_ring_emit_gds_switch */
4987 		7 + /* gfx_v7_0_ring_emit_hdp_flush */
4988 		5 + /* hdp invalidate */
4989 		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
4990 		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
4991 		7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
4992 		7, /* gfx_v7_0_emit_mem_sync_compute */
4993 	.emit_ib_size =	7, /* gfx_v7_0_ring_emit_ib_compute */
4994 	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
4995 	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
4996 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
4997 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
4998 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
4999 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5000 	.test_ring = gfx_v7_0_ring_test_ring,
5001 	.test_ib = gfx_v7_0_ring_test_ib,
5002 	.insert_nop = amdgpu_ring_insert_nop,
5003 	.pad_ib = amdgpu_ring_generic_pad_ib,
5004 	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5005 	.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
5006 };
5007 
5008 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5009 {
5010 	int i;
5011 
5012 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5013 		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5014 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5015 		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5016 }
5017 
5018 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5019 	.set = gfx_v7_0_set_eop_interrupt_state,
5020 	.process = gfx_v7_0_eop_irq,
5021 };
5022 
5023 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5024 	.set = gfx_v7_0_set_priv_reg_fault_state,
5025 	.process = gfx_v7_0_priv_reg_irq,
5026 };
5027 
5028 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5029 	.set = gfx_v7_0_set_priv_inst_fault_state,
5030 	.process = gfx_v7_0_priv_inst_irq,
5031 };
5032 
5033 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5034 {
5035 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5036 	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5037 
5038 	adev->gfx.priv_reg_irq.num_types = 1;
5039 	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5040 
5041 	adev->gfx.priv_inst_irq.num_types = 1;
5042 	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5043 }
5044 
5045 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5046 {
5047 	/* init asci gds info */
5048 	adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
5049 	adev->gds.gws_size = 64;
5050 	adev->gds.oa_size = 16;
5051 	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
5052 }
5053 
5054 
5055 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5056 {
5057 	int i, j, k, counter, active_cu_number = 0;
5058 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5059 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5060 	unsigned disable_masks[4 * 2];
5061 	u32 ao_cu_num;
5062 
5063 	if (adev->flags & AMD_IS_APU)
5064 		ao_cu_num = 2;
5065 	else
5066 		ao_cu_num = adev->gfx.config.max_cu_per_sh;
5067 
5068 	memset(cu_info, 0, sizeof(*cu_info));
5069 
5070 	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5071 
5072 	mutex_lock(&adev->grbm_idx_mutex);
5073 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5074 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5075 			mask = 1;
5076 			ao_bitmap = 0;
5077 			counter = 0;
5078 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
5079 			if (i < 4 && j < 2)
5080 				gfx_v7_0_set_user_cu_inactive_bitmap(
5081 					adev, disable_masks[i * 2 + j]);
5082 			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5083 			cu_info->bitmap[0][i][j] = bitmap;
5084 
5085 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5086 				if (bitmap & mask) {
5087 					if (counter < ao_cu_num)
5088 						ao_bitmap |= mask;
5089 					counter++;
5090 				}
5091 				mask <<= 1;
5092 			}
5093 			active_cu_number += counter;
5094 			if (i < 2 && j < 2)
5095 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5096 			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5097 		}
5098 	}
5099 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5100 	mutex_unlock(&adev->grbm_idx_mutex);
5101 
5102 	cu_info->number = active_cu_number;
5103 	cu_info->ao_cu_mask = ao_cu_mask;
5104 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5105 	cu_info->max_waves_per_simd = 10;
5106 	cu_info->max_scratch_slots_per_cu = 32;
5107 	cu_info->wave_front_size = 64;
5108 	cu_info->lds_size = 64;
5109 }
5110 
5111 const struct amdgpu_ip_block_version gfx_v7_1_ip_block = {
5112 	.type = AMD_IP_BLOCK_TYPE_GFX,
5113 	.major = 7,
5114 	.minor = 1,
5115 	.rev = 0,
5116 	.funcs = &gfx_v7_0_ip_funcs,
5117 };
5118 
5119 const struct amdgpu_ip_block_version gfx_v7_2_ip_block = {
5120 	.type = AMD_IP_BLOCK_TYPE_GFX,
5121 	.major = 7,
5122 	.minor = 2,
5123 	.rev = 0,
5124 	.funcs = &gfx_v7_0_ip_funcs,
5125 };
5126 
5127 const struct amdgpu_ip_block_version gfx_v7_3_ip_block = {
5128 	.type = AMD_IP_BLOCK_TYPE_GFX,
5129 	.major = 7,
5130 	.minor = 3,
5131 	.rev = 0,
5132 	.funcs = &gfx_v7_0_ip_funcs,
5133 };
5134