1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_ucode.h"
30 #include "clearstate_si.h"
31 #include "si.h"
32 #include "sid.h"
33
34 #include "bif/bif_3_0_d.h"
35 #include "bif/bif_3_0_sh_mask.h"
36
37 #include "oss/oss_1_0_d.h"
38 #include "oss/oss_1_0_sh_mask.h"
39
40 #include "gca/gfx_6_0_d.h"
41 #include "gca/gfx_6_0_sh_mask.h"
42 #include "gca/gfx_7_2_enum.h"
43
44 #include "gmc/gmc_6_0_d.h"
45 #include "gmc/gmc_6_0_sh_mask.h"
46
47 #include "dce/dce_6_0_d.h"
48 #include "dce/dce_6_0_sh_mask.h"
49
50 #include "si_enums.h"
51
52 #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
53 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
54 #define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
55
56 #define GFX6_NUM_GFX_RINGS 1
57 #define GFX6_NUM_COMPUTE_RINGS 2
58
59 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
60 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
61 static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
62
63 MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
64 MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
65 MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
66 MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
67
68 MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
70 MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
71 MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
72
73 MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/verde_me.bin");
75 MODULE_FIRMWARE("amdgpu/verde_ce.bin");
76 MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
77
78 MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
79 MODULE_FIRMWARE("amdgpu/oland_me.bin");
80 MODULE_FIRMWARE("amdgpu/oland_ce.bin");
81 MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
82
83 MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
84 MODULE_FIRMWARE("amdgpu/hainan_me.bin");
85 MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
86 MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
87
88 static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
89 static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
90 //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
91 static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
92
93 #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
94 #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
95 #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
96 #define MICRO_TILE_MODE(x) ((x) << 0)
97 #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
98 #define BANK_WIDTH(x) ((x) << 14)
99 #define BANK_HEIGHT(x) ((x) << 16)
100 #define MACRO_TILE_ASPECT(x) ((x) << 18)
101 #define NUM_BANKS(x) ((x) << 20)
102
103 static const u32 verde_rlc_save_restore_register_list[] =
104 {
105 (0x8000 << 16) | (0x98f4 >> 2),
106 0x00000000,
107 (0x8040 << 16) | (0x98f4 >> 2),
108 0x00000000,
109 (0x8000 << 16) | (0xe80 >> 2),
110 0x00000000,
111 (0x8040 << 16) | (0xe80 >> 2),
112 0x00000000,
113 (0x8000 << 16) | (0x89bc >> 2),
114 0x00000000,
115 (0x8040 << 16) | (0x89bc >> 2),
116 0x00000000,
117 (0x8000 << 16) | (0x8c1c >> 2),
118 0x00000000,
119 (0x8040 << 16) | (0x8c1c >> 2),
120 0x00000000,
121 (0x9c00 << 16) | (0x98f0 >> 2),
122 0x00000000,
123 (0x9c00 << 16) | (0xe7c >> 2),
124 0x00000000,
125 (0x8000 << 16) | (0x9148 >> 2),
126 0x00000000,
127 (0x8040 << 16) | (0x9148 >> 2),
128 0x00000000,
129 (0x9c00 << 16) | (0x9150 >> 2),
130 0x00000000,
131 (0x9c00 << 16) | (0x897c >> 2),
132 0x00000000,
133 (0x9c00 << 16) | (0x8d8c >> 2),
134 0x00000000,
135 (0x9c00 << 16) | (0xac54 >> 2),
136 0X00000000,
137 0x3,
138 (0x9c00 << 16) | (0x98f8 >> 2),
139 0x00000000,
140 (0x9c00 << 16) | (0x9910 >> 2),
141 0x00000000,
142 (0x9c00 << 16) | (0x9914 >> 2),
143 0x00000000,
144 (0x9c00 << 16) | (0x9918 >> 2),
145 0x00000000,
146 (0x9c00 << 16) | (0x991c >> 2),
147 0x00000000,
148 (0x9c00 << 16) | (0x9920 >> 2),
149 0x00000000,
150 (0x9c00 << 16) | (0x9924 >> 2),
151 0x00000000,
152 (0x9c00 << 16) | (0x9928 >> 2),
153 0x00000000,
154 (0x9c00 << 16) | (0x992c >> 2),
155 0x00000000,
156 (0x9c00 << 16) | (0x9930 >> 2),
157 0x00000000,
158 (0x9c00 << 16) | (0x9934 >> 2),
159 0x00000000,
160 (0x9c00 << 16) | (0x9938 >> 2),
161 0x00000000,
162 (0x9c00 << 16) | (0x993c >> 2),
163 0x00000000,
164 (0x9c00 << 16) | (0x9940 >> 2),
165 0x00000000,
166 (0x9c00 << 16) | (0x9944 >> 2),
167 0x00000000,
168 (0x9c00 << 16) | (0x9948 >> 2),
169 0x00000000,
170 (0x9c00 << 16) | (0x994c >> 2),
171 0x00000000,
172 (0x9c00 << 16) | (0x9950 >> 2),
173 0x00000000,
174 (0x9c00 << 16) | (0x9954 >> 2),
175 0x00000000,
176 (0x9c00 << 16) | (0x9958 >> 2),
177 0x00000000,
178 (0x9c00 << 16) | (0x995c >> 2),
179 0x00000000,
180 (0x9c00 << 16) | (0x9960 >> 2),
181 0x00000000,
182 (0x9c00 << 16) | (0x9964 >> 2),
183 0x00000000,
184 (0x9c00 << 16) | (0x9968 >> 2),
185 0x00000000,
186 (0x9c00 << 16) | (0x996c >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x9970 >> 2),
189 0x00000000,
190 (0x9c00 << 16) | (0x9974 >> 2),
191 0x00000000,
192 (0x9c00 << 16) | (0x9978 >> 2),
193 0x00000000,
194 (0x9c00 << 16) | (0x997c >> 2),
195 0x00000000,
196 (0x9c00 << 16) | (0x9980 >> 2),
197 0x00000000,
198 (0x9c00 << 16) | (0x9984 >> 2),
199 0x00000000,
200 (0x9c00 << 16) | (0x9988 >> 2),
201 0x00000000,
202 (0x9c00 << 16) | (0x998c >> 2),
203 0x00000000,
204 (0x9c00 << 16) | (0x8c00 >> 2),
205 0x00000000,
206 (0x9c00 << 16) | (0x8c14 >> 2),
207 0x00000000,
208 (0x9c00 << 16) | (0x8c04 >> 2),
209 0x00000000,
210 (0x9c00 << 16) | (0x8c08 >> 2),
211 0x00000000,
212 (0x8000 << 16) | (0x9b7c >> 2),
213 0x00000000,
214 (0x8040 << 16) | (0x9b7c >> 2),
215 0x00000000,
216 (0x8000 << 16) | (0xe84 >> 2),
217 0x00000000,
218 (0x8040 << 16) | (0xe84 >> 2),
219 0x00000000,
220 (0x8000 << 16) | (0x89c0 >> 2),
221 0x00000000,
222 (0x8040 << 16) | (0x89c0 >> 2),
223 0x00000000,
224 (0x8000 << 16) | (0x914c >> 2),
225 0x00000000,
226 (0x8040 << 16) | (0x914c >> 2),
227 0x00000000,
228 (0x8000 << 16) | (0x8c20 >> 2),
229 0x00000000,
230 (0x8040 << 16) | (0x8c20 >> 2),
231 0x00000000,
232 (0x8000 << 16) | (0x9354 >> 2),
233 0x00000000,
234 (0x8040 << 16) | (0x9354 >> 2),
235 0x00000000,
236 (0x9c00 << 16) | (0x9060 >> 2),
237 0x00000000,
238 (0x9c00 << 16) | (0x9364 >> 2),
239 0x00000000,
240 (0x9c00 << 16) | (0x9100 >> 2),
241 0x00000000,
242 (0x9c00 << 16) | (0x913c >> 2),
243 0x00000000,
244 (0x8000 << 16) | (0x90e0 >> 2),
245 0x00000000,
246 (0x8000 << 16) | (0x90e4 >> 2),
247 0x00000000,
248 (0x8000 << 16) | (0x90e8 >> 2),
249 0x00000000,
250 (0x8040 << 16) | (0x90e0 >> 2),
251 0x00000000,
252 (0x8040 << 16) | (0x90e4 >> 2),
253 0x00000000,
254 (0x8040 << 16) | (0x90e8 >> 2),
255 0x00000000,
256 (0x9c00 << 16) | (0x8bcc >> 2),
257 0x00000000,
258 (0x9c00 << 16) | (0x8b24 >> 2),
259 0x00000000,
260 (0x9c00 << 16) | (0x88c4 >> 2),
261 0x00000000,
262 (0x9c00 << 16) | (0x8e50 >> 2),
263 0x00000000,
264 (0x9c00 << 16) | (0x8c0c >> 2),
265 0x00000000,
266 (0x9c00 << 16) | (0x8e58 >> 2),
267 0x00000000,
268 (0x9c00 << 16) | (0x8e5c >> 2),
269 0x00000000,
270 (0x9c00 << 16) | (0x9508 >> 2),
271 0x00000000,
272 (0x9c00 << 16) | (0x950c >> 2),
273 0x00000000,
274 (0x9c00 << 16) | (0x9494 >> 2),
275 0x00000000,
276 (0x9c00 << 16) | (0xac0c >> 2),
277 0x00000000,
278 (0x9c00 << 16) | (0xac10 >> 2),
279 0x00000000,
280 (0x9c00 << 16) | (0xac14 >> 2),
281 0x00000000,
282 (0x9c00 << 16) | (0xae00 >> 2),
283 0x00000000,
284 (0x9c00 << 16) | (0xac08 >> 2),
285 0x00000000,
286 (0x9c00 << 16) | (0x88d4 >> 2),
287 0x00000000,
288 (0x9c00 << 16) | (0x88c8 >> 2),
289 0x00000000,
290 (0x9c00 << 16) | (0x88cc >> 2),
291 0x00000000,
292 (0x9c00 << 16) | (0x89b0 >> 2),
293 0x00000000,
294 (0x9c00 << 16) | (0x8b10 >> 2),
295 0x00000000,
296 (0x9c00 << 16) | (0x8a14 >> 2),
297 0x00000000,
298 (0x9c00 << 16) | (0x9830 >> 2),
299 0x00000000,
300 (0x9c00 << 16) | (0x9834 >> 2),
301 0x00000000,
302 (0x9c00 << 16) | (0x9838 >> 2),
303 0x00000000,
304 (0x9c00 << 16) | (0x9a10 >> 2),
305 0x00000000,
306 (0x8000 << 16) | (0x9870 >> 2),
307 0x00000000,
308 (0x8000 << 16) | (0x9874 >> 2),
309 0x00000000,
310 (0x8001 << 16) | (0x9870 >> 2),
311 0x00000000,
312 (0x8001 << 16) | (0x9874 >> 2),
313 0x00000000,
314 (0x8040 << 16) | (0x9870 >> 2),
315 0x00000000,
316 (0x8040 << 16) | (0x9874 >> 2),
317 0x00000000,
318 (0x8041 << 16) | (0x9870 >> 2),
319 0x00000000,
320 (0x8041 << 16) | (0x9874 >> 2),
321 0x00000000,
322 0x00000000
323 };
324
gfx_v6_0_init_microcode(struct amdgpu_device * adev)325 static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
326 {
327 const char *chip_name;
328 int err;
329 const struct gfx_firmware_header_v1_0 *cp_hdr;
330 const struct rlc_firmware_header_v1_0 *rlc_hdr;
331
332 DRM_DEBUG("\n");
333
334 switch (adev->asic_type) {
335 case CHIP_TAHITI:
336 chip_name = "tahiti";
337 break;
338 case CHIP_PITCAIRN:
339 chip_name = "pitcairn";
340 break;
341 case CHIP_VERDE:
342 chip_name = "verde";
343 break;
344 case CHIP_OLAND:
345 chip_name = "oland";
346 break;
347 case CHIP_HAINAN:
348 chip_name = "hainan";
349 break;
350 default: BUG();
351 }
352
353 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
354 AMDGPU_UCODE_REQUIRED,
355 "amdgpu/%s_pfp.bin", chip_name);
356 if (err)
357 goto out;
358 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
359 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
360 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
361
362 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
363 AMDGPU_UCODE_REQUIRED,
364 "amdgpu/%s_me.bin", chip_name);
365 if (err)
366 goto out;
367 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
368 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
369 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
370
371 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
372 AMDGPU_UCODE_REQUIRED,
373 "amdgpu/%s_ce.bin", chip_name);
374 if (err)
375 goto out;
376 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
377 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
378 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
379
380 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
381 AMDGPU_UCODE_REQUIRED,
382 "amdgpu/%s_rlc.bin", chip_name);
383 if (err)
384 goto out;
385 rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
386 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
387 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
388
389 out:
390 if (err) {
391 pr_err("gfx6: Failed to load firmware %s gfx firmware\n", chip_name);
392 amdgpu_ucode_release(&adev->gfx.pfp_fw);
393 amdgpu_ucode_release(&adev->gfx.me_fw);
394 amdgpu_ucode_release(&adev->gfx.ce_fw);
395 amdgpu_ucode_release(&adev->gfx.rlc_fw);
396 }
397 return err;
398 }
399
gfx_v6_0_tiling_mode_table_init(struct amdgpu_device * adev)400 static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
401 {
402 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
403 u32 reg_offset, split_equal_to_row_size, *tilemode;
404
405 memset(adev->gfx.config.tile_mode_array, 0, sizeof(adev->gfx.config.tile_mode_array));
406 tilemode = adev->gfx.config.tile_mode_array;
407
408 switch (adev->gfx.config.mem_row_size_in_kb) {
409 case 1:
410 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
411 break;
412 case 2:
413 default:
414 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
415 break;
416 case 4:
417 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
418 break;
419 }
420
421 if (adev->asic_type == CHIP_VERDE) {
422 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
423 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
424 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
425 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
426 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
427 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
428 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
429 NUM_BANKS(ADDR_SURF_16_BANK);
430 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
431 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
432 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
433 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
434 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
435 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
436 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
437 NUM_BANKS(ADDR_SURF_16_BANK);
438 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
439 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
440 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
441 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
442 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
443 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
444 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
445 NUM_BANKS(ADDR_SURF_16_BANK);
446 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
447 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
448 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
449 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
450 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
451 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
452 NUM_BANKS(ADDR_SURF_8_BANK) |
453 TILE_SPLIT(split_equal_to_row_size);
454 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
455 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
456 PIPE_CONFIG(ADDR_SURF_P4_8x16);
457 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
458 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
459 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
460 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
461 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
462 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
463 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
464 NUM_BANKS(ADDR_SURF_4_BANK);
465 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
466 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
467 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
468 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
469 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
470 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
471 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
472 NUM_BANKS(ADDR_SURF_4_BANK);
473 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
474 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
475 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
476 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
477 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
478 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
479 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
480 NUM_BANKS(ADDR_SURF_2_BANK);
481 tilemode[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
482 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
483 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
484 PIPE_CONFIG(ADDR_SURF_P4_8x16);
485 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
486 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
487 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
488 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
489 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
490 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
491 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
492 NUM_BANKS(ADDR_SURF_16_BANK);
493 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
494 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
495 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
496 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
497 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
500 NUM_BANKS(ADDR_SURF_16_BANK);
501 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
502 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
503 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
504 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
505 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
506 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
507 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
508 NUM_BANKS(ADDR_SURF_16_BANK);
509 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
510 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
511 PIPE_CONFIG(ADDR_SURF_P4_8x16);
512 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
513 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
514 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
515 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
516 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
517 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
518 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
519 NUM_BANKS(ADDR_SURF_16_BANK);
520 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
521 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
522 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
523 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
524 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
525 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
526 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
527 NUM_BANKS(ADDR_SURF_16_BANK);
528 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
529 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
530 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
531 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
532 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
533 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
534 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
535 NUM_BANKS(ADDR_SURF_16_BANK);
536 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
537 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
538 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
539 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
540 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
541 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
542 NUM_BANKS(ADDR_SURF_16_BANK) |
543 TILE_SPLIT(split_equal_to_row_size);
544 tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
545 ARRAY_MODE(ARRAY_1D_TILED_THICK) |
546 PIPE_CONFIG(ADDR_SURF_P4_8x16);
547 tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
548 ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
549 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
550 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
551 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
552 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
553 NUM_BANKS(ADDR_SURF_16_BANK) |
554 TILE_SPLIT(split_equal_to_row_size);
555 tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
556 ARRAY_MODE(ARRAY_2D_TILED_THICK) |
557 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
558 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
559 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
560 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
561 NUM_BANKS(ADDR_SURF_16_BANK) |
562 TILE_SPLIT(split_equal_to_row_size);
563 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
564 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
565 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
566 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
567 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
568 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
569 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
570 NUM_BANKS(ADDR_SURF_8_BANK);
571 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
572 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
573 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
574 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
575 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
576 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
577 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
578 NUM_BANKS(ADDR_SURF_8_BANK);
579 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
580 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
581 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
582 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
583 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
584 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
585 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
586 NUM_BANKS(ADDR_SURF_4_BANK);
587 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
588 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
589 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
590 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
591 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
592 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
593 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
594 NUM_BANKS(ADDR_SURF_4_BANK);
595 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
596 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
597 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
598 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
599 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
600 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
601 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
602 NUM_BANKS(ADDR_SURF_2_BANK);
603 tilemode[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
604 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
605 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
606 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
607 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
608 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
609 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
610 NUM_BANKS(ADDR_SURF_2_BANK);
611 tilemode[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
612 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
613 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
614 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
615 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
616 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
617 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
618 NUM_BANKS(ADDR_SURF_2_BANK);
619 tilemode[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
620 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
621 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
623 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
624 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
625 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
626 NUM_BANKS(ADDR_SURF_2_BANK);
627 tilemode[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
628 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
629 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
630 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
631 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
632 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
633 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
634 NUM_BANKS(ADDR_SURF_2_BANK);
635 tilemode[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
636 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
637 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
638 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
639 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
640 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
641 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
642 NUM_BANKS(ADDR_SURF_2_BANK);
643 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
644 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
645 } else if (adev->asic_type == CHIP_OLAND) {
646 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
647 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
648 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
649 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
650 NUM_BANKS(ADDR_SURF_16_BANK) |
651 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
652 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
653 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
654 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
655 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
656 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
657 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
658 NUM_BANKS(ADDR_SURF_16_BANK) |
659 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
660 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
661 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
662 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
663 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
664 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
665 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
666 NUM_BANKS(ADDR_SURF_16_BANK) |
667 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
668 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
669 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
670 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
671 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
672 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
673 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
674 NUM_BANKS(ADDR_SURF_16_BANK) |
675 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
676 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
677 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
678 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
679 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
680 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
681 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
682 NUM_BANKS(ADDR_SURF_16_BANK) |
683 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
684 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
685 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
686 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
687 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
688 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
689 TILE_SPLIT(split_equal_to_row_size) |
690 NUM_BANKS(ADDR_SURF_16_BANK) |
691 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
692 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
693 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
694 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
695 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
696 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
697 TILE_SPLIT(split_equal_to_row_size) |
698 NUM_BANKS(ADDR_SURF_16_BANK) |
699 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
700 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
701 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
702 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
703 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
704 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
705 TILE_SPLIT(split_equal_to_row_size) |
706 NUM_BANKS(ADDR_SURF_16_BANK) |
707 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
708 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
709 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
710 tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
711 ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
712 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
713 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
714 NUM_BANKS(ADDR_SURF_16_BANK) |
715 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
716 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
717 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
718 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
719 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
720 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
721 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
722 NUM_BANKS(ADDR_SURF_16_BANK) |
723 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
724 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
725 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
726 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
727 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
728 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
729 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
730 NUM_BANKS(ADDR_SURF_16_BANK) |
731 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
732 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
733 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
734 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
735 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
736 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
737 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
738 NUM_BANKS(ADDR_SURF_16_BANK) |
739 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
740 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
741 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
742 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
743 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
744 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
745 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
746 NUM_BANKS(ADDR_SURF_16_BANK) |
747 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
748 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
749 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
750 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
751 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
752 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
753 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
754 NUM_BANKS(ADDR_SURF_16_BANK) |
755 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
756 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
757 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
758 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
759 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
760 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
761 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
762 NUM_BANKS(ADDR_SURF_16_BANK) |
763 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
764 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
765 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
766 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
767 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
768 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
769 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
770 NUM_BANKS(ADDR_SURF_16_BANK) |
771 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
772 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
773 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
774 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
775 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
776 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
777 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
778 NUM_BANKS(ADDR_SURF_16_BANK) |
779 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
780 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
781 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
782 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
783 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
784 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
785 TILE_SPLIT(split_equal_to_row_size) |
786 NUM_BANKS(ADDR_SURF_16_BANK) |
787 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
788 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
789 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
790 tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
791 ARRAY_MODE(ARRAY_1D_TILED_THICK) |
792 PIPE_CONFIG(ADDR_SURF_P4_8x16);
793 tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
794 ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
795 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
796 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
797 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
798 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
799 NUM_BANKS(ADDR_SURF_16_BANK) |
800 TILE_SPLIT(split_equal_to_row_size);
801 tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
802 ARRAY_MODE(ARRAY_2D_TILED_THICK) |
803 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
804 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
805 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
806 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
807 NUM_BANKS(ADDR_SURF_16_BANK) |
808 TILE_SPLIT(split_equal_to_row_size);
809 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
810 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
811 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
812 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
813 NUM_BANKS(ADDR_SURF_16_BANK) |
814 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
815 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
816 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
817 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
818 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
819 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
820 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
821 NUM_BANKS(ADDR_SURF_16_BANK) |
822 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
823 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
824 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
825 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
826 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
827 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
828 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
829 NUM_BANKS(ADDR_SURF_16_BANK) |
830 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
831 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
832 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
833 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
834 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
835 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
836 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
837 NUM_BANKS(ADDR_SURF_16_BANK) |
838 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
839 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
840 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
841 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
842 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
843 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
844 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
845 NUM_BANKS(ADDR_SURF_8_BANK) |
846 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
847 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
848 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
849 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
850 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
851 } else if (adev->asic_type == CHIP_HAINAN) {
852 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
853 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
854 PIPE_CONFIG(ADDR_SURF_P2) |
855 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
856 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
857 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
858 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
859 NUM_BANKS(ADDR_SURF_16_BANK);
860 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
861 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
862 PIPE_CONFIG(ADDR_SURF_P2) |
863 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
864 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
865 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
866 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
867 NUM_BANKS(ADDR_SURF_16_BANK);
868 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
869 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
870 PIPE_CONFIG(ADDR_SURF_P2) |
871 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
872 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
873 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
874 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
875 NUM_BANKS(ADDR_SURF_16_BANK);
876 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
877 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
878 PIPE_CONFIG(ADDR_SURF_P2) |
879 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
882 NUM_BANKS(ADDR_SURF_8_BANK) |
883 TILE_SPLIT(split_equal_to_row_size);
884 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
885 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
886 PIPE_CONFIG(ADDR_SURF_P2);
887 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
888 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
889 PIPE_CONFIG(ADDR_SURF_P2) |
890 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
891 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
892 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
893 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
894 NUM_BANKS(ADDR_SURF_8_BANK);
895 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
896 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
897 PIPE_CONFIG(ADDR_SURF_P2) |
898 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
899 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
900 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
901 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
902 NUM_BANKS(ADDR_SURF_8_BANK);
903 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
904 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
905 PIPE_CONFIG(ADDR_SURF_P2) |
906 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
907 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
908 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
909 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
910 NUM_BANKS(ADDR_SURF_4_BANK);
911 tilemode[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
912 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
913 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
914 PIPE_CONFIG(ADDR_SURF_P2);
915 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
916 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
917 PIPE_CONFIG(ADDR_SURF_P2) |
918 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
919 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
920 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
921 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
922 NUM_BANKS(ADDR_SURF_16_BANK);
923 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
924 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
925 PIPE_CONFIG(ADDR_SURF_P2) |
926 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
927 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
928 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
929 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
930 NUM_BANKS(ADDR_SURF_16_BANK);
931 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
932 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
933 PIPE_CONFIG(ADDR_SURF_P2) |
934 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
935 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
936 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
937 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
938 NUM_BANKS(ADDR_SURF_16_BANK);
939 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
940 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
941 PIPE_CONFIG(ADDR_SURF_P2);
942 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
943 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
944 PIPE_CONFIG(ADDR_SURF_P2) |
945 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
946 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
947 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
948 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
949 NUM_BANKS(ADDR_SURF_16_BANK);
950 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
951 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
952 PIPE_CONFIG(ADDR_SURF_P2) |
953 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
954 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
955 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
956 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
957 NUM_BANKS(ADDR_SURF_16_BANK);
958 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
959 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
960 PIPE_CONFIG(ADDR_SURF_P2) |
961 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
962 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
963 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
964 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
965 NUM_BANKS(ADDR_SURF_16_BANK);
966 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
967 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
968 PIPE_CONFIG(ADDR_SURF_P2) |
969 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
970 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
971 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
972 NUM_BANKS(ADDR_SURF_16_BANK) |
973 TILE_SPLIT(split_equal_to_row_size);
974 tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
975 ARRAY_MODE(ARRAY_1D_TILED_THICK) |
976 PIPE_CONFIG(ADDR_SURF_P2);
977 tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
978 ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
979 PIPE_CONFIG(ADDR_SURF_P2) |
980 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
981 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
982 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
983 NUM_BANKS(ADDR_SURF_16_BANK) |
984 TILE_SPLIT(split_equal_to_row_size);
985 tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
986 ARRAY_MODE(ARRAY_2D_TILED_THICK) |
987 PIPE_CONFIG(ADDR_SURF_P2) |
988 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
989 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
990 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
991 NUM_BANKS(ADDR_SURF_16_BANK) |
992 TILE_SPLIT(split_equal_to_row_size);
993 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
994 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
995 PIPE_CONFIG(ADDR_SURF_P2) |
996 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
997 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
998 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
999 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1000 NUM_BANKS(ADDR_SURF_8_BANK);
1001 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1002 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1003 PIPE_CONFIG(ADDR_SURF_P2) |
1004 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1005 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1006 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1007 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1008 NUM_BANKS(ADDR_SURF_8_BANK);
1009 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1010 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1011 PIPE_CONFIG(ADDR_SURF_P2) |
1012 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1013 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1014 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1015 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1016 NUM_BANKS(ADDR_SURF_8_BANK);
1017 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1018 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1019 PIPE_CONFIG(ADDR_SURF_P2) |
1020 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1021 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1022 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1023 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1024 NUM_BANKS(ADDR_SURF_8_BANK);
1025 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1026 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1027 PIPE_CONFIG(ADDR_SURF_P2) |
1028 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1029 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1030 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1031 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1032 NUM_BANKS(ADDR_SURF_4_BANK);
1033 tilemode[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1034 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1035 PIPE_CONFIG(ADDR_SURF_P2) |
1036 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1037 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1038 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1039 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1040 NUM_BANKS(ADDR_SURF_4_BANK);
1041 tilemode[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1042 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1043 PIPE_CONFIG(ADDR_SURF_P2) |
1044 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1045 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1046 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1047 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1048 NUM_BANKS(ADDR_SURF_4_BANK);
1049 tilemode[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1050 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1051 PIPE_CONFIG(ADDR_SURF_P2) |
1052 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1053 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1054 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1055 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1056 NUM_BANKS(ADDR_SURF_4_BANK);
1057 tilemode[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1058 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1059 PIPE_CONFIG(ADDR_SURF_P2) |
1060 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1061 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1062 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1063 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1064 NUM_BANKS(ADDR_SURF_4_BANK);
1065 tilemode[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1066 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1067 PIPE_CONFIG(ADDR_SURF_P2) |
1068 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1069 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1070 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1071 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1072 NUM_BANKS(ADDR_SURF_4_BANK);
1073 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1074 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
1075 } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
1076 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1077 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1078 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1079 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1080 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1081 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1082 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1083 NUM_BANKS(ADDR_SURF_16_BANK);
1084 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1085 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1086 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1087 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1088 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1089 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1090 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1091 NUM_BANKS(ADDR_SURF_16_BANK);
1092 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1093 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1094 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1095 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1096 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1097 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1098 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1099 NUM_BANKS(ADDR_SURF_16_BANK);
1100 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1101 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1102 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1103 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1104 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1105 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1106 NUM_BANKS(ADDR_SURF_4_BANK) |
1107 TILE_SPLIT(split_equal_to_row_size);
1108 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1109 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1110 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16);
1111 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1112 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1113 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1114 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1115 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1116 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1117 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1118 NUM_BANKS(ADDR_SURF_2_BANK);
1119 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1120 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1121 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1122 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1123 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1124 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1125 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1126 NUM_BANKS(ADDR_SURF_2_BANK);
1127 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1128 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1129 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1130 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1131 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1132 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1133 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1134 NUM_BANKS(ADDR_SURF_2_BANK);
1135 tilemode[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
1136 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1137 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1138 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16);
1139 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1140 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1141 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1142 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1143 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1144 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1145 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1146 NUM_BANKS(ADDR_SURF_16_BANK);
1147 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1148 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1149 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1150 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1151 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1152 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1153 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1154 NUM_BANKS(ADDR_SURF_16_BANK);
1155 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1156 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1157 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1158 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1159 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1160 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1161 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1162 NUM_BANKS(ADDR_SURF_16_BANK);
1163 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1164 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1165 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16);
1166 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1167 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1168 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1169 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1170 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1171 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1172 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1173 NUM_BANKS(ADDR_SURF_16_BANK);
1174 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1175 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1176 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1177 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1178 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1179 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1180 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1181 NUM_BANKS(ADDR_SURF_16_BANK);
1182 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1183 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1184 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1185 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1186 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1187 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1188 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1189 NUM_BANKS(ADDR_SURF_16_BANK);
1190 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1191 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1192 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1193 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1194 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1195 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1196 NUM_BANKS(ADDR_SURF_16_BANK) |
1197 TILE_SPLIT(split_equal_to_row_size);
1198 tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1199 ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1200 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16);
1201 tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1202 ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1203 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1204 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1207 NUM_BANKS(ADDR_SURF_16_BANK) |
1208 TILE_SPLIT(split_equal_to_row_size);
1209 tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1210 ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1211 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1212 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1213 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1214 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1215 NUM_BANKS(ADDR_SURF_16_BANK) |
1216 TILE_SPLIT(split_equal_to_row_size);
1217 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1218 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1219 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1220 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1221 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1222 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1223 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1224 NUM_BANKS(ADDR_SURF_4_BANK);
1225 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1226 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1227 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1228 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1229 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1230 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1231 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1232 NUM_BANKS(ADDR_SURF_4_BANK);
1233 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1234 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1235 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1236 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1237 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1238 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1239 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1240 NUM_BANKS(ADDR_SURF_2_BANK);
1241 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1242 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1243 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1244 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1245 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1246 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1247 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1248 NUM_BANKS(ADDR_SURF_2_BANK);
1249 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1250 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1251 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1252 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1253 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1254 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1255 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1256 NUM_BANKS(ADDR_SURF_2_BANK);
1257 tilemode[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1258 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1259 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1260 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1261 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1262 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1263 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1264 NUM_BANKS(ADDR_SURF_2_BANK);
1265 tilemode[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1266 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1267 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1268 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1269 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1270 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1271 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1272 NUM_BANKS(ADDR_SURF_2_BANK);
1273 tilemode[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1274 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1275 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1276 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1277 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1278 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1279 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1280 NUM_BANKS(ADDR_SURF_2_BANK);
1281 tilemode[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1282 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1283 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1284 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1285 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1286 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1287 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1288 NUM_BANKS(ADDR_SURF_2_BANK);
1289 tilemode[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1290 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1291 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1292 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1293 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1294 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1295 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1296 NUM_BANKS(ADDR_SURF_2_BANK);
1297 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1298 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
1299 } else {
1300 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1301 }
1302 }
1303
gfx_v6_0_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)1304 static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1305 u32 sh_num, u32 instance, int xcc_id)
1306 {
1307 u32 data;
1308
1309 if (instance == 0xffffffff)
1310 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1311 else
1312 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1313
1314 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1315 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1316 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1317 else if (se_num == 0xffffffff)
1318 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1319 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1320 else if (sh_num == 0xffffffff)
1321 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1322 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1323 else
1324 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1325 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1326 WREG32(mmGRBM_GFX_INDEX, data);
1327 }
1328
gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device * adev)1329 static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1330 {
1331 u32 data, mask;
1332
1333 data = RREG32(mmCC_RB_BACKEND_DISABLE) |
1334 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1335
1336 data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
1337
1338 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se/
1339 adev->gfx.config.max_sh_per_se);
1340
1341 return ~data & mask;
1342 }
1343
gfx_v6_0_raster_config(struct amdgpu_device * adev,u32 * rconf)1344 static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
1345 {
1346 switch (adev->asic_type) {
1347 case CHIP_TAHITI:
1348 case CHIP_PITCAIRN:
1349 *rconf |=
1350 (2 << PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT) |
1351 (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
1352 (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
1353 (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT) |
1354 (2 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT) |
1355 (2 << PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT) |
1356 (2 << PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT);
1357 break;
1358 case CHIP_VERDE:
1359 *rconf |=
1360 (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT) |
1361 (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT) |
1362 (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT);
1363 break;
1364 case CHIP_OLAND:
1365 *rconf |= (1 << PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT);
1366 break;
1367 case CHIP_HAINAN:
1368 *rconf |= 0x0;
1369 break;
1370 default:
1371 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1372 break;
1373 }
1374 }
1375
gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device * adev,u32 raster_config,unsigned rb_mask,unsigned num_rb)1376 static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1377 u32 raster_config, unsigned rb_mask,
1378 unsigned num_rb)
1379 {
1380 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1381 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1382 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1383 unsigned rb_per_se = num_rb / num_se;
1384 unsigned se_mask[4];
1385 unsigned se;
1386
1387 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1388 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1389 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1390 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1391
1392 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1393 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1394 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1395
1396 for (se = 0; se < num_se; se++) {
1397 unsigned raster_config_se = raster_config;
1398 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1399 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1400 int idx = (se / 2) * 2;
1401
1402 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1403 raster_config_se &= ~PA_SC_RASTER_CONFIG__SE_MAP_MASK;
1404
1405 if (!se_mask[idx])
1406 raster_config_se |= RASTER_CONFIG_SE_MAP_3 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
1407 else
1408 raster_config_se |= RASTER_CONFIG_SE_MAP_0 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT;
1409 }
1410
1411 pkr0_mask &= rb_mask;
1412 pkr1_mask &= rb_mask;
1413 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1414 raster_config_se &= ~PA_SC_RASTER_CONFIG__PKR_MAP_MASK;
1415
1416 if (!pkr0_mask)
1417 raster_config_se |= RASTER_CONFIG_PKR_MAP_3 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
1418 else
1419 raster_config_se |= RASTER_CONFIG_PKR_MAP_0 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT;
1420 }
1421
1422 if (rb_per_se >= 2) {
1423 unsigned rb0_mask = 1 << (se * rb_per_se);
1424 unsigned rb1_mask = rb0_mask << 1;
1425
1426 rb0_mask &= rb_mask;
1427 rb1_mask &= rb_mask;
1428 if (!rb0_mask || !rb1_mask) {
1429 raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK;
1430
1431 if (!rb0_mask)
1432 raster_config_se |=
1433 RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
1434 else
1435 raster_config_se |=
1436 RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT;
1437 }
1438
1439 if (rb_per_se > 2) {
1440 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1441 rb1_mask = rb0_mask << 1;
1442 rb0_mask &= rb_mask;
1443 rb1_mask &= rb_mask;
1444 if (!rb0_mask || !rb1_mask) {
1445 raster_config_se &= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK;
1446
1447 if (!rb0_mask)
1448 raster_config_se |=
1449 RASTER_CONFIG_RB_MAP_3 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
1450 else
1451 raster_config_se |=
1452 RASTER_CONFIG_RB_MAP_0 << PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT;
1453 }
1454 }
1455 }
1456
1457 /* GRBM_GFX_INDEX has a different offset on SI */
1458 gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
1459 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1460 }
1461
1462 /* GRBM_GFX_INDEX has a different offset on SI */
1463 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1464 }
1465
gfx_v6_0_setup_rb(struct amdgpu_device * adev)1466 static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
1467 {
1468 int i, j;
1469 u32 data;
1470 u32 raster_config = 0;
1471 u32 active_rbs = 0;
1472 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1473 adev->gfx.config.max_sh_per_se;
1474 unsigned num_rb_pipes;
1475
1476 mutex_lock(&adev->grbm_idx_mutex);
1477 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1478 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1479 gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1480 data = gfx_v6_0_get_rb_active_bitmap(adev);
1481 active_rbs |= data <<
1482 ((i * adev->gfx.config.max_sh_per_se + j) *
1483 rb_bitmap_width_per_sh);
1484 }
1485 }
1486 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1487
1488 adev->gfx.config.backend_enable_mask = active_rbs;
1489 adev->gfx.config.num_rbs = hweight32(active_rbs);
1490
1491 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1492 adev->gfx.config.max_shader_engines, 16);
1493
1494 gfx_v6_0_raster_config(adev, &raster_config);
1495
1496 if (!adev->gfx.config.backend_enable_mask ||
1497 adev->gfx.config.num_rbs >= num_rb_pipes)
1498 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1499 else
1500 gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
1501 adev->gfx.config.backend_enable_mask,
1502 num_rb_pipes);
1503
1504 /* cache the values for userspace */
1505 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1506 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1507 gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1508 adev->gfx.config.rb_config[i][j].rb_backend_disable =
1509 RREG32(mmCC_RB_BACKEND_DISABLE);
1510 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1511 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1512 adev->gfx.config.rb_config[i][j].raster_config =
1513 RREG32(mmPA_SC_RASTER_CONFIG);
1514 }
1515 }
1516 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1517 mutex_unlock(&adev->grbm_idx_mutex);
1518 }
1519
gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap)1520 static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
1521 u32 bitmap)
1522 {
1523 u32 data;
1524
1525 if (!bitmap)
1526 return;
1527
1528 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
1529 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
1530
1531 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
1532 }
1533
gfx_v6_0_get_cu_enabled(struct amdgpu_device * adev)1534 static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
1535 {
1536 u32 data, mask;
1537
1538 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
1539 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
1540
1541 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
1542 return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
1543 }
1544
1545
gfx_v6_0_setup_spi(struct amdgpu_device * adev)1546 static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
1547 {
1548 int i, j, k;
1549 u32 data, mask;
1550 u32 active_cu = 0;
1551
1552 mutex_lock(&adev->grbm_idx_mutex);
1553 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1554 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1555 gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
1556 data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
1557 active_cu = gfx_v6_0_get_cu_enabled(adev);
1558
1559 mask = 1;
1560 for (k = 0; k < 16; k++) {
1561 mask <<= k;
1562 if (active_cu & mask) {
1563 data &= ~mask;
1564 WREG32(mmSPI_STATIC_THREAD_MGMT_3, data);
1565 break;
1566 }
1567 }
1568 }
1569 }
1570 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1571 mutex_unlock(&adev->grbm_idx_mutex);
1572 }
1573
1574 /**
1575 * gfx_v6_0_setup_tcc() - setup which TCCs are used
1576 *
1577 * @adev: amdgpu_device pointer
1578 *
1579 * Verify whether the current GPU has any TCCs disabled,
1580 * which can happen when the GPU is harvested and some
1581 * memory channels are disabled, reducing the memory bus width.
1582 * For example, on the Radeon HD 7870 XT (Tahiti LE).
1583 *
1584 * If some TCCs are disabled, we need to make sure that
1585 * the disabled TCCs are not used, and the remaining TCCs
1586 * are used optimally.
1587 *
1588 * TCP_CHAN_STEER_LO/HI control which TCC is used by TCP channels.
1589 * TCP_ADDR_CONFIG.NUM_TCC_BANKS controls how many channels are used.
1590 *
1591 * For optimal performance:
1592 * - Rely on the CHAN_STEER from the golden registers table,
1593 * only skip disabled TCCs but keep the mapping order.
1594 * - Limit NUM_TCC_BANKS to number of active TCCs to avoid thrashing,
1595 * which performs better than using the same TCC twice.
1596 */
gfx_v6_0_setup_tcc(struct amdgpu_device * adev)1597 static void gfx_v6_0_setup_tcc(struct amdgpu_device *adev)
1598 {
1599 u32 i, tcc, tcp_addr_config, num_active_tcc = 0;
1600 u64 chan_steer, patched_chan_steer = 0;
1601 const u32 num_max_tcc = adev->gfx.config.max_texture_channel_caches;
1602 const u32 dis_tcc_mask =
1603 amdgpu_gfx_create_bitmask(num_max_tcc) &
1604 (REG_GET_FIELD(RREG32(mmCGTS_TCC_DISABLE),
1605 CGTS_TCC_DISABLE, TCC_DISABLE) |
1606 REG_GET_FIELD(RREG32(mmCGTS_USER_TCC_DISABLE),
1607 CGTS_USER_TCC_DISABLE, TCC_DISABLE));
1608
1609 /* When no TCC is disabled, the golden registers table already has optimal TCC setup */
1610 if (!dis_tcc_mask)
1611 return;
1612
1613 /* Each 4-bit nibble contains the index of a TCC used by all TCPs */
1614 chan_steer = RREG32(mmTCP_CHAN_STEER_LO) | ((u64)RREG32(mmTCP_CHAN_STEER_HI) << 32ull);
1615
1616 /* Patch the TCP to TCC mapping to skip disabled TCCs */
1617 for (i = 0; i < num_max_tcc; ++i) {
1618 tcc = (chan_steer >> (u64)(4 * i)) & 0xf;
1619
1620 if (!((1 << tcc) & dis_tcc_mask)) {
1621 /* Copy enabled TCC indices to the patched register value. */
1622 patched_chan_steer |= (u64)tcc << (u64)(4 * num_active_tcc);
1623 ++num_active_tcc;
1624 }
1625 }
1626
1627 WARN_ON(num_active_tcc != num_max_tcc - hweight32(dis_tcc_mask));
1628
1629 /* Patch number of TCCs used by TCPs */
1630 tcp_addr_config = REG_SET_FIELD(RREG32(mmTCP_ADDR_CONFIG),
1631 TCP_ADDR_CONFIG, NUM_TCC_BANKS,
1632 num_active_tcc - 1);
1633
1634 WREG32(mmTCP_ADDR_CONFIG, tcp_addr_config);
1635 WREG32(mmTCP_CHAN_STEER_HI, upper_32_bits(patched_chan_steer));
1636 WREG32(mmTCP_CHAN_STEER_LO, lower_32_bits(patched_chan_steer));
1637 }
1638
gfx_v6_0_config_init(struct amdgpu_device * adev)1639 static void gfx_v6_0_config_init(struct amdgpu_device *adev)
1640 {
1641 adev->gfx.config.double_offchip_lds_buf = 0;
1642 }
1643
gfx_v6_0_constants_init(struct amdgpu_device * adev)1644 static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
1645 {
1646 u32 gb_addr_config = 0;
1647 u32 mc_arb_ramcfg;
1648 u32 sx_debug_1;
1649 u32 hdp_host_path_cntl;
1650 u32 tmp;
1651
1652 switch (adev->asic_type) {
1653 case CHIP_TAHITI:
1654 adev->gfx.config.max_shader_engines = 2;
1655 adev->gfx.config.max_tile_pipes = 12;
1656 adev->gfx.config.max_cu_per_sh = 8;
1657 adev->gfx.config.max_sh_per_se = 2;
1658 adev->gfx.config.max_backends_per_se = 4;
1659 adev->gfx.config.max_texture_channel_caches = 12;
1660 adev->gfx.config.max_gprs = 256;
1661 adev->gfx.config.max_gs_threads = 32;
1662 adev->gfx.config.max_hw_contexts = 8;
1663
1664 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1665 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1666 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1667 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1668 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1669 break;
1670 case CHIP_PITCAIRN:
1671 adev->gfx.config.max_shader_engines = 2;
1672 adev->gfx.config.max_tile_pipes = 8;
1673 adev->gfx.config.max_cu_per_sh = 5;
1674 adev->gfx.config.max_sh_per_se = 2;
1675 adev->gfx.config.max_backends_per_se = 4;
1676 adev->gfx.config.max_texture_channel_caches = 8;
1677 adev->gfx.config.max_gprs = 256;
1678 adev->gfx.config.max_gs_threads = 32;
1679 adev->gfx.config.max_hw_contexts = 8;
1680
1681 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1682 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1683 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1684 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1685 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1686 break;
1687 case CHIP_VERDE:
1688 adev->gfx.config.max_shader_engines = 1;
1689 adev->gfx.config.max_tile_pipes = 4;
1690 adev->gfx.config.max_cu_per_sh = 5;
1691 adev->gfx.config.max_sh_per_se = 2;
1692 adev->gfx.config.max_backends_per_se = 4;
1693 adev->gfx.config.max_texture_channel_caches = 4;
1694 adev->gfx.config.max_gprs = 256;
1695 adev->gfx.config.max_gs_threads = 32;
1696 adev->gfx.config.max_hw_contexts = 8;
1697
1698 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1699 adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
1700 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1701 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1702 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1703 break;
1704 case CHIP_OLAND:
1705 adev->gfx.config.max_shader_engines = 1;
1706 adev->gfx.config.max_tile_pipes = 4;
1707 adev->gfx.config.max_cu_per_sh = 6;
1708 adev->gfx.config.max_sh_per_se = 1;
1709 adev->gfx.config.max_backends_per_se = 2;
1710 adev->gfx.config.max_texture_channel_caches = 4;
1711 adev->gfx.config.max_gprs = 256;
1712 adev->gfx.config.max_gs_threads = 16;
1713 adev->gfx.config.max_hw_contexts = 8;
1714
1715 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1716 adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
1717 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1718 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1719 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1720 break;
1721 case CHIP_HAINAN:
1722 adev->gfx.config.max_shader_engines = 1;
1723 adev->gfx.config.max_tile_pipes = 4;
1724 adev->gfx.config.max_cu_per_sh = 5;
1725 adev->gfx.config.max_sh_per_se = 1;
1726 adev->gfx.config.max_backends_per_se = 1;
1727 adev->gfx.config.max_texture_channel_caches = 2;
1728 adev->gfx.config.max_gprs = 256;
1729 adev->gfx.config.max_gs_threads = 16;
1730 adev->gfx.config.max_hw_contexts = 8;
1731
1732 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1733 adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
1734 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1735 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1736 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
1737 break;
1738 default:
1739 BUG();
1740 break;
1741 }
1742
1743 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1744 WREG32(mmSRBM_INT_CNTL, 1);
1745 WREG32(mmSRBM_INT_ACK, 1);
1746
1747 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
1748
1749 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1750 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1751
1752 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1753 adev->gfx.config.mem_max_burst_length_bytes = 256;
1754 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
1755 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1756 if (adev->gfx.config.mem_row_size_in_kb > 4)
1757 adev->gfx.config.mem_row_size_in_kb = 4;
1758 adev->gfx.config.shader_engine_tile_size = 32;
1759 adev->gfx.config.num_gpus = 1;
1760 adev->gfx.config.multi_gpu_tile_size = 64;
1761
1762 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
1763 switch (adev->gfx.config.mem_row_size_in_kb) {
1764 case 1:
1765 default:
1766 gb_addr_config |= 0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
1767 break;
1768 case 2:
1769 gb_addr_config |= 1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
1770 break;
1771 case 4:
1772 gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
1773 break;
1774 }
1775 gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
1776 if (adev->gfx.config.max_shader_engines == 2)
1777 gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
1778 adev->gfx.config.gb_addr_config = gb_addr_config;
1779
1780 WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
1781 WREG32(mmDMIF_ADDR_CONFIG, gb_addr_config);
1782 WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
1783 WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
1784 WREG32(mmDMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1785 WREG32(mmDMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1786
1787 #if 0
1788 if (adev->has_uvd) {
1789 WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
1790 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1791 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1792 }
1793 #endif
1794 gfx_v6_0_tiling_mode_table_init(adev);
1795
1796 gfx_v6_0_setup_rb(adev);
1797 gfx_v6_0_setup_tcc(adev);
1798
1799 gfx_v6_0_setup_spi(adev);
1800
1801 gfx_v6_0_get_cu_info(adev);
1802 gfx_v6_0_config_init(adev);
1803
1804 WREG32(mmCP_QUEUE_THRESHOLDS,
1805 ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
1806 (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
1807
1808 /* set HW defaults for 3D engine */
1809 WREG32(mmCP_MEQ_THRESHOLDS,
1810 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1811 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1812
1813 sx_debug_1 = RREG32(mmSX_DEBUG_1);
1814 WREG32(mmSX_DEBUG_1, sx_debug_1);
1815
1816 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1817
1818 WREG32(mmPA_SC_FIFO_SIZE, ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1819 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1820 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1821 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1822
1823 WREG32(mmVGT_NUM_INSTANCES, 1);
1824 WREG32(mmCP_PERFMON_CNTL, 0);
1825 WREG32(mmSQ_CONFIG, 0);
1826 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1827 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1828
1829 WREG32(mmVGT_CACHE_INVALIDATION,
1830 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1831 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1832
1833 WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1834 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1835
1836 WREG32(mmCB_PERFCOUNTER0_SELECT0, 0);
1837 WREG32(mmCB_PERFCOUNTER0_SELECT1, 0);
1838 WREG32(mmCB_PERFCOUNTER1_SELECT0, 0);
1839 WREG32(mmCB_PERFCOUNTER1_SELECT1, 0);
1840 WREG32(mmCB_PERFCOUNTER2_SELECT0, 0);
1841 WREG32(mmCB_PERFCOUNTER2_SELECT1, 0);
1842 WREG32(mmCB_PERFCOUNTER3_SELECT0, 0);
1843 WREG32(mmCB_PERFCOUNTER3_SELECT1, 0);
1844
1845 hdp_host_path_cntl = RREG32(mmHDP_HOST_PATH_CNTL);
1846 WREG32(mmHDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1847
1848 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1849 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
1850
1851 udelay(50);
1852 }
1853
gfx_v6_0_ring_test_ring(struct amdgpu_ring * ring)1854 static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
1855 {
1856 struct amdgpu_device *adev = ring->adev;
1857 uint32_t tmp = 0;
1858 unsigned i;
1859 int r;
1860
1861 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
1862
1863 r = amdgpu_ring_alloc(ring, 3);
1864 if (r)
1865 return r;
1866
1867 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1868 amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_CONFIG_REG_START);
1869 amdgpu_ring_write(ring, 0xDEADBEEF);
1870 amdgpu_ring_commit(ring);
1871
1872 for (i = 0; i < adev->usec_timeout; i++) {
1873 tmp = RREG32(mmSCRATCH_REG0);
1874 if (tmp == 0xDEADBEEF)
1875 break;
1876 udelay(1);
1877 }
1878
1879 if (i >= adev->usec_timeout)
1880 r = -ETIMEDOUT;
1881 return r;
1882 }
1883
gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring * ring)1884 static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
1885 {
1886 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
1887 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
1888 EVENT_INDEX(0));
1889 }
1890
gfx_v6_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1891 static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1892 u64 seq, unsigned flags)
1893 {
1894 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
1895 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
1896 /* flush read cache over gart */
1897 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1898 amdgpu_ring_write(ring, (mmCP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
1899 amdgpu_ring_write(ring, 0);
1900 amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1901 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
1902 PACKET3_TC_ACTION_ENA |
1903 PACKET3_SH_KCACHE_ACTION_ENA |
1904 PACKET3_SH_ICACHE_ACTION_ENA);
1905 amdgpu_ring_write(ring, 0xFFFFFFFF);
1906 amdgpu_ring_write(ring, 0);
1907 amdgpu_ring_write(ring, 10); /* poll interval */
1908 /* EVENT_WRITE_EOP - flush caches, send int */
1909 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1910 amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
1911 amdgpu_ring_write(ring, addr & 0xfffffffc);
1912 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
1913 ((write64bit ? 2 : 1) << CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT) |
1914 ((int_sel ? 2 : 0) << CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT));
1915 amdgpu_ring_write(ring, lower_32_bits(seq));
1916 amdgpu_ring_write(ring, upper_32_bits(seq));
1917 }
1918
gfx_v6_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1919 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1920 struct amdgpu_job *job,
1921 struct amdgpu_ib *ib,
1922 uint32_t flags)
1923 {
1924 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1925 u32 header, control = 0;
1926
1927 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
1928 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
1929 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
1930 amdgpu_ring_write(ring, 0);
1931 }
1932
1933 if (ib->flags & AMDGPU_IB_FLAG_CE)
1934 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
1935 else
1936 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
1937
1938 control |= ib->length_dw | (vmid << 24);
1939
1940 amdgpu_ring_write(ring, header);
1941 amdgpu_ring_write(ring,
1942 #ifdef __BIG_ENDIAN
1943 (2 << 0) |
1944 #endif
1945 (ib->gpu_addr & 0xFFFFFFFC));
1946 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1947 amdgpu_ring_write(ring, control);
1948 }
1949
1950 /**
1951 * gfx_v6_0_ring_test_ib - basic ring IB test
1952 *
1953 * @ring: amdgpu_ring structure holding ring information
1954 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1955 *
1956 * Allocate an IB and execute it on the gfx ring (SI).
1957 * Provides a basic gfx ring test to verify that IBs are working.
1958 * Returns 0 on success, error on failure.
1959 */
gfx_v6_0_ring_test_ib(struct amdgpu_ring * ring,long timeout)1960 static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1961 {
1962 struct amdgpu_device *adev = ring->adev;
1963 struct dma_fence *f = NULL;
1964 struct amdgpu_ib ib;
1965 uint32_t tmp = 0;
1966 long r;
1967
1968 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
1969 memset(&ib, 0, sizeof(ib));
1970 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
1971 if (r)
1972 return r;
1973
1974 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
1975 ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_CONFIG_REG_START;
1976 ib.ptr[2] = 0xDEADBEEF;
1977 ib.length_dw = 3;
1978
1979 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1980 if (r)
1981 goto error;
1982
1983 r = dma_fence_wait_timeout(f, false, timeout);
1984 if (r == 0) {
1985 r = -ETIMEDOUT;
1986 goto error;
1987 } else if (r < 0) {
1988 goto error;
1989 }
1990 tmp = RREG32(mmSCRATCH_REG0);
1991 if (tmp == 0xDEADBEEF)
1992 r = 0;
1993 else
1994 r = -EINVAL;
1995
1996 error:
1997 amdgpu_ib_free(&ib, NULL);
1998 dma_fence_put(f);
1999 return r;
2000 }
2001
gfx_v6_0_cp_gfx_enable(struct amdgpu_device * adev,bool enable)2002 static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2003 {
2004 if (enable) {
2005 WREG32(mmCP_ME_CNTL, 0);
2006 } else {
2007 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
2008 CP_ME_CNTL__PFP_HALT_MASK |
2009 CP_ME_CNTL__CE_HALT_MASK));
2010 WREG32(mmSCRATCH_UMSK, 0);
2011 }
2012 udelay(50);
2013 }
2014
gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device * adev)2015 static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2016 {
2017 unsigned i;
2018 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2019 const struct gfx_firmware_header_v1_0 *ce_hdr;
2020 const struct gfx_firmware_header_v1_0 *me_hdr;
2021 const __le32 *fw_data;
2022 u32 fw_size;
2023
2024 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2025 return -EINVAL;
2026
2027 gfx_v6_0_cp_gfx_enable(adev, false);
2028 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2029 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2030 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2031
2032 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2033 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2034 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2035
2036 /* PFP */
2037 fw_data = (const __le32 *)
2038 (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2039 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2040 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2041 for (i = 0; i < fw_size; i++)
2042 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2043 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2044
2045 /* CE */
2046 fw_data = (const __le32 *)
2047 (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2048 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2049 WREG32(mmCP_CE_UCODE_ADDR, 0);
2050 for (i = 0; i < fw_size; i++)
2051 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2052 WREG32(mmCP_CE_UCODE_ADDR, 0);
2053
2054 /* ME */
2055 fw_data = (const __be32 *)
2056 (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2057 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2058 WREG32(mmCP_ME_RAM_WADDR, 0);
2059 for (i = 0; i < fw_size; i++)
2060 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2061 WREG32(mmCP_ME_RAM_WADDR, 0);
2062
2063 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2064 WREG32(mmCP_CE_UCODE_ADDR, 0);
2065 WREG32(mmCP_ME_RAM_WADDR, 0);
2066 WREG32(mmCP_ME_RAM_RADDR, 0);
2067 return 0;
2068 }
2069
gfx_v6_0_cp_gfx_start(struct amdgpu_device * adev)2070 static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
2071 {
2072 const struct cs_section_def *sect = NULL;
2073 const struct cs_extent_def *ext = NULL;
2074 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2075 int r, i;
2076
2077 r = amdgpu_ring_alloc(ring, 7 + 4);
2078 if (r) {
2079 drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
2080 return r;
2081 }
2082 amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2083 amdgpu_ring_write(ring, 0x1);
2084 amdgpu_ring_write(ring, 0x0);
2085 amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
2086 amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2087 amdgpu_ring_write(ring, 0);
2088 amdgpu_ring_write(ring, 0);
2089
2090 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2091 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2092 amdgpu_ring_write(ring, 0xc000);
2093 amdgpu_ring_write(ring, 0xe000);
2094 amdgpu_ring_commit(ring);
2095
2096 gfx_v6_0_cp_gfx_enable(adev, true);
2097
2098 r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
2099 if (r) {
2100 drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
2101 return r;
2102 }
2103
2104 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2105 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2106
2107 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2108 for (ext = sect->section; ext->extent != NULL; ++ext) {
2109 if (sect->id == SECT_CONTEXT) {
2110 amdgpu_ring_write(ring,
2111 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2112 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2113 for (i = 0; i < ext->reg_count; i++)
2114 amdgpu_ring_write(ring, ext->extent[i]);
2115 }
2116 }
2117 }
2118
2119 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2120 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2121
2122 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2123 amdgpu_ring_write(ring, 0);
2124
2125 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2126 amdgpu_ring_write(ring, 0x00000316);
2127 amdgpu_ring_write(ring, 0x0000000e);
2128 amdgpu_ring_write(ring, 0x00000010);
2129
2130 amdgpu_ring_commit(ring);
2131
2132 return 0;
2133 }
2134
gfx_v6_0_cp_gfx_resume(struct amdgpu_device * adev)2135 static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
2136 {
2137 struct amdgpu_ring *ring;
2138 u32 tmp;
2139 u32 rb_bufsz;
2140 int r;
2141 u64 rptr_addr;
2142
2143 WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2144 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2145
2146 /* Set the write pointer delay */
2147 WREG32(mmCP_RB_WPTR_DELAY, 0);
2148
2149 WREG32(mmCP_DEBUG, 0);
2150 WREG32(mmSCRATCH_ADDR, 0);
2151
2152 /* ring 0 - compute and gfx */
2153 /* Set ring buffer size */
2154 ring = &adev->gfx.gfx_ring[0];
2155 rb_bufsz = order_base_2(ring->ring_size / 8);
2156 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2157
2158 #ifdef __BIG_ENDIAN
2159 tmp |= BUF_SWAP_32BIT;
2160 #endif
2161 WREG32(mmCP_RB0_CNTL, tmp);
2162
2163 /* Initialize the ring buffer's read and write pointers */
2164 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2165 ring->wptr = 0;
2166 WREG32(mmCP_RB0_WPTR, ring->wptr);
2167
2168 /* set the wb address whether it's enabled or not */
2169 rptr_addr = ring->rptr_gpu_addr;
2170 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2171 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2172
2173 WREG32(mmSCRATCH_UMSK, 0);
2174
2175 mdelay(1);
2176 WREG32(mmCP_RB0_CNTL, tmp);
2177
2178 WREG32(mmCP_RB0_BASE, ring->gpu_addr >> 8);
2179
2180 /* start the rings */
2181 gfx_v6_0_cp_gfx_start(adev);
2182 r = amdgpu_ring_test_helper(ring);
2183 if (r)
2184 return r;
2185
2186 return 0;
2187 }
2188
gfx_v6_0_ring_get_rptr(struct amdgpu_ring * ring)2189 static u64 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
2190 {
2191 return *ring->rptr_cpu_addr;
2192 }
2193
gfx_v6_0_ring_get_wptr(struct amdgpu_ring * ring)2194 static u64 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
2195 {
2196 struct amdgpu_device *adev = ring->adev;
2197
2198 if (ring == &adev->gfx.gfx_ring[0])
2199 return RREG32(mmCP_RB0_WPTR);
2200 else if (ring == &adev->gfx.compute_ring[0])
2201 return RREG32(mmCP_RB1_WPTR);
2202 else if (ring == &adev->gfx.compute_ring[1])
2203 return RREG32(mmCP_RB2_WPTR);
2204 else
2205 BUG();
2206 }
2207
gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring * ring)2208 static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2209 {
2210 struct amdgpu_device *adev = ring->adev;
2211
2212 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2213 (void)RREG32(mmCP_RB0_WPTR);
2214 }
2215
gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring * ring)2216 static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2217 {
2218 struct amdgpu_device *adev = ring->adev;
2219
2220 if (ring == &adev->gfx.compute_ring[0]) {
2221 WREG32(mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2222 (void)RREG32(mmCP_RB1_WPTR);
2223 } else if (ring == &adev->gfx.compute_ring[1]) {
2224 WREG32(mmCP_RB2_WPTR, lower_32_bits(ring->wptr));
2225 (void)RREG32(mmCP_RB2_WPTR);
2226 } else {
2227 BUG();
2228 }
2229
2230 }
2231
gfx_v6_0_cp_compute_resume(struct amdgpu_device * adev)2232 static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
2233 {
2234 struct amdgpu_ring *ring;
2235 u32 tmp;
2236 u32 rb_bufsz;
2237 int i, r;
2238 u64 rptr_addr;
2239
2240 /* ring1 - compute only */
2241 /* Set ring buffer size */
2242
2243 ring = &adev->gfx.compute_ring[0];
2244 rb_bufsz = order_base_2(ring->ring_size / 8);
2245 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2246 #ifdef __BIG_ENDIAN
2247 tmp |= BUF_SWAP_32BIT;
2248 #endif
2249 WREG32(mmCP_RB1_CNTL, tmp);
2250
2251 WREG32(mmCP_RB1_CNTL, tmp | CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK);
2252 ring->wptr = 0;
2253 WREG32(mmCP_RB1_WPTR, ring->wptr);
2254
2255 rptr_addr = ring->rptr_gpu_addr;
2256 WREG32(mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2257 WREG32(mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2258
2259 mdelay(1);
2260 WREG32(mmCP_RB1_CNTL, tmp);
2261 WREG32(mmCP_RB1_BASE, ring->gpu_addr >> 8);
2262
2263 ring = &adev->gfx.compute_ring[1];
2264 rb_bufsz = order_base_2(ring->ring_size / 8);
2265 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2266 #ifdef __BIG_ENDIAN
2267 tmp |= BUF_SWAP_32BIT;
2268 #endif
2269 WREG32(mmCP_RB2_CNTL, tmp);
2270
2271 WREG32(mmCP_RB2_CNTL, tmp | CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK);
2272 ring->wptr = 0;
2273 WREG32(mmCP_RB2_WPTR, ring->wptr);
2274 rptr_addr = ring->rptr_gpu_addr;
2275 WREG32(mmCP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
2276 WREG32(mmCP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2277
2278 mdelay(1);
2279 WREG32(mmCP_RB2_CNTL, tmp);
2280 WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
2281
2282
2283 for (i = 0; i < 2; i++) {
2284 r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
2285 if (r)
2286 return r;
2287 }
2288
2289 return 0;
2290 }
2291
gfx_v6_0_cp_enable(struct amdgpu_device * adev,bool enable)2292 static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
2293 {
2294 gfx_v6_0_cp_gfx_enable(adev, enable);
2295 }
2296
gfx_v6_0_cp_load_microcode(struct amdgpu_device * adev)2297 static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
2298 {
2299 return gfx_v6_0_cp_gfx_load_microcode(adev);
2300 }
2301
gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable)2302 static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2303 bool enable)
2304 {
2305 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
2306 u32 mask;
2307 int i;
2308
2309 if (enable)
2310 tmp |= (CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
2311 CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
2312 else
2313 tmp &= ~(CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK |
2314 CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK);
2315 WREG32(mmCP_INT_CNTL_RING0, tmp);
2316
2317 if (!enable) {
2318 /* read a gfx register */
2319 tmp = RREG32(mmDB_DEPTH_INFO);
2320
2321 mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
2322 for (i = 0; i < adev->usec_timeout; i++) {
2323 if ((RREG32(mmRLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
2324 break;
2325 udelay(1);
2326 }
2327 }
2328 }
2329
gfx_v6_0_cp_resume(struct amdgpu_device * adev)2330 static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
2331 {
2332 int r;
2333
2334 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
2335
2336 r = gfx_v6_0_cp_load_microcode(adev);
2337 if (r)
2338 return r;
2339
2340 r = gfx_v6_0_cp_gfx_resume(adev);
2341 if (r)
2342 return r;
2343 r = gfx_v6_0_cp_compute_resume(adev);
2344 if (r)
2345 return r;
2346
2347 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
2348
2349 return 0;
2350 }
2351
gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2352 static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2353 {
2354 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2355 uint32_t seq = ring->fence_drv.sync_seq;
2356 uint64_t addr = ring->fence_drv.gpu_addr;
2357
2358 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2359 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
2360 WAIT_REG_MEM_FUNCTION(3) | /* equal */
2361 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
2362 amdgpu_ring_write(ring, addr & 0xfffffffc);
2363 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
2364 amdgpu_ring_write(ring, seq);
2365 amdgpu_ring_write(ring, 0xffffffff);
2366 amdgpu_ring_write(ring, 4); /* poll interval */
2367
2368 if (usepfp) {
2369 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
2370 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2371 amdgpu_ring_write(ring, 0);
2372 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2373 amdgpu_ring_write(ring, 0);
2374 }
2375 }
2376
gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2377 static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
2378 unsigned vmid, uint64_t pd_addr)
2379 {
2380 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2381
2382 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2383
2384 /* wait for the invalidate to complete */
2385 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2386 amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
2387 WAIT_REG_MEM_ENGINE(0))); /* me */
2388 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
2389 amdgpu_ring_write(ring, 0);
2390 amdgpu_ring_write(ring, 0); /* ref */
2391 amdgpu_ring_write(ring, 0); /* mask */
2392 amdgpu_ring_write(ring, 0x20); /* poll interval */
2393
2394 if (usepfp) {
2395 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2396 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2397 amdgpu_ring_write(ring, 0x0);
2398
2399 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
2400 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2401 amdgpu_ring_write(ring, 0);
2402 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2403 amdgpu_ring_write(ring, 0);
2404 }
2405 }
2406
gfx_v6_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)2407 static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
2408 uint32_t reg, uint32_t val)
2409 {
2410 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2411
2412 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2413 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
2414 WRITE_DATA_DST_SEL(0)));
2415 amdgpu_ring_write(ring, reg);
2416 amdgpu_ring_write(ring, 0);
2417 amdgpu_ring_write(ring, val);
2418 }
2419
gfx_v6_0_rlc_init(struct amdgpu_device * adev)2420 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
2421 {
2422 const u32 *src_ptr;
2423 u32 *dst_ptr;
2424 u32 dws;
2425 u64 reg_list_mc_addr;
2426 const struct cs_section_def *cs_data;
2427 int r;
2428
2429 adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
2430 adev->gfx.rlc.reg_list_size =
2431 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
2432
2433 adev->gfx.rlc.cs_data = si_cs_data;
2434 src_ptr = adev->gfx.rlc.reg_list;
2435 dws = adev->gfx.rlc.reg_list_size;
2436 cs_data = adev->gfx.rlc.cs_data;
2437
2438 if (src_ptr) {
2439 /* init save restore block */
2440 r = amdgpu_gfx_rlc_init_sr(adev, dws);
2441 if (r)
2442 return r;
2443 }
2444
2445 if (cs_data) {
2446 /* clear state block */
2447 adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
2448 dws = adev->gfx.rlc.clear_state_size + (256 / 4);
2449
2450 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
2451 AMDGPU_GEM_DOMAIN_VRAM |
2452 AMDGPU_GEM_DOMAIN_GTT,
2453 &adev->gfx.rlc.clear_state_obj,
2454 &adev->gfx.rlc.clear_state_gpu_addr,
2455 (void **)&adev->gfx.rlc.cs_ptr);
2456 if (r) {
2457 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
2458 amdgpu_gfx_rlc_fini(adev);
2459 return r;
2460 }
2461
2462 /* set up the cs buffer */
2463 dst_ptr = adev->gfx.rlc.cs_ptr;
2464 reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
2465 dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
2466 dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
2467 dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
2468 gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
2469 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
2470 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
2471 }
2472
2473 return 0;
2474 }
2475
gfx_v6_0_enable_lbpw(struct amdgpu_device * adev,bool enable)2476 static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
2477 {
2478 WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
2479
2480 if (!enable) {
2481 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2482 WREG32(mmSPI_LB_CU_MASK, 0x00ff);
2483 }
2484 }
2485
gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device * adev)2486 static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2487 {
2488 int i;
2489
2490 for (i = 0; i < adev->usec_timeout; i++) {
2491 if (RREG32(mmRLC_SERDES_MASTER_BUSY_0) == 0)
2492 break;
2493 udelay(1);
2494 }
2495
2496 for (i = 0; i < adev->usec_timeout; i++) {
2497 if (RREG32(mmRLC_SERDES_MASTER_BUSY_1) == 0)
2498 break;
2499 udelay(1);
2500 }
2501 }
2502
gfx_v6_0_update_rlc(struct amdgpu_device * adev,u32 rlc)2503 static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
2504 {
2505 u32 tmp;
2506
2507 tmp = RREG32(mmRLC_CNTL);
2508 if (tmp != rlc)
2509 WREG32(mmRLC_CNTL, rlc);
2510 }
2511
gfx_v6_0_halt_rlc(struct amdgpu_device * adev)2512 static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
2513 {
2514 u32 data, orig;
2515
2516 orig = data = RREG32(mmRLC_CNTL);
2517
2518 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2519 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
2520 WREG32(mmRLC_CNTL, data);
2521
2522 gfx_v6_0_wait_for_rlc_serdes(adev);
2523 }
2524
2525 return orig;
2526 }
2527
gfx_v6_0_rlc_stop(struct amdgpu_device * adev)2528 static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
2529 {
2530 WREG32(mmRLC_CNTL, 0);
2531
2532 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
2533 gfx_v6_0_wait_for_rlc_serdes(adev);
2534 }
2535
gfx_v6_0_rlc_start(struct amdgpu_device * adev)2536 static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
2537 {
2538 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
2539
2540 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
2541
2542 udelay(50);
2543 }
2544
gfx_v6_0_rlc_reset(struct amdgpu_device * adev)2545 static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
2546 {
2547 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2548 udelay(50);
2549 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2550 udelay(50);
2551 }
2552
gfx_v6_0_lbpw_supported(struct amdgpu_device * adev)2553 static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
2554 {
2555 u32 tmp;
2556
2557 /* Enable LBPW only for DDR3 */
2558 tmp = RREG32(mmMC_SEQ_MISC0);
2559 if ((tmp & 0xF0000000) == 0xB0000000)
2560 return true;
2561 return false;
2562 }
2563
gfx_v6_0_init_cg(struct amdgpu_device * adev)2564 static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
2565 {
2566 }
2567
gfx_v6_0_rlc_resume(struct amdgpu_device * adev)2568 static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
2569 {
2570 u32 i;
2571 const struct rlc_firmware_header_v1_0 *hdr;
2572 const __le32 *fw_data;
2573 u32 fw_size;
2574
2575
2576 if (!adev->gfx.rlc_fw)
2577 return -EINVAL;
2578
2579 adev->gfx.rlc.funcs->stop(adev);
2580 adev->gfx.rlc.funcs->reset(adev);
2581 gfx_v6_0_init_pg(adev);
2582 gfx_v6_0_init_cg(adev);
2583
2584 WREG32(mmRLC_RL_BASE, 0);
2585 WREG32(mmRLC_RL_SIZE, 0);
2586 WREG32(mmRLC_LB_CNTL, 0);
2587 WREG32(mmRLC_LB_CNTR_MAX, 0xffffffff);
2588 WREG32(mmRLC_LB_CNTR_INIT, 0);
2589 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
2590
2591 WREG32(mmRLC_MC_CNTL, 0);
2592 WREG32(mmRLC_UCODE_CNTL, 0);
2593
2594 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
2595 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2596 fw_data = (const __le32 *)
2597 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2598
2599 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2600
2601 for (i = 0; i < fw_size; i++) {
2602 WREG32(mmRLC_UCODE_ADDR, i);
2603 WREG32(mmRLC_UCODE_DATA, le32_to_cpup(fw_data++));
2604 }
2605 WREG32(mmRLC_UCODE_ADDR, 0);
2606
2607 gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
2608 adev->gfx.rlc.funcs->start(adev);
2609
2610 return 0;
2611 }
2612
gfx_v6_0_enable_cgcg(struct amdgpu_device * adev,bool enable)2613 static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
2614 {
2615 u32 data, orig, tmp;
2616
2617 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
2618
2619 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2620 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
2621
2622 WREG32(mmRLC_GCPM_GENERAL_3, 0x00000080);
2623
2624 tmp = gfx_v6_0_halt_rlc(adev);
2625
2626 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
2627 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
2628 WREG32(mmRLC_SERDES_WR_CTRL, 0x00b000ff);
2629
2630 gfx_v6_0_wait_for_rlc_serdes(adev);
2631 gfx_v6_0_update_rlc(adev, tmp);
2632
2633 WREG32(mmRLC_SERDES_WR_CTRL, 0x007000ff);
2634
2635 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2636 } else {
2637 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
2638
2639 RREG32(mmCB_CGTT_SCLK_CTRL);
2640 RREG32(mmCB_CGTT_SCLK_CTRL);
2641 RREG32(mmCB_CGTT_SCLK_CTRL);
2642 RREG32(mmCB_CGTT_SCLK_CTRL);
2643
2644 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2645 }
2646
2647 if (orig != data)
2648 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
2649
2650 }
2651
gfx_v6_0_enable_mgcg(struct amdgpu_device * adev,bool enable)2652 static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
2653 {
2654
2655 u32 data, orig, tmp = 0;
2656
2657 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2658 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
2659 data = 0x96940200;
2660 if (orig != data)
2661 WREG32(mmCGTS_SM_CTRL_REG, data);
2662
2663 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2664 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
2665 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2666 if (orig != data)
2667 WREG32(mmCP_MEM_SLP_CNTL, data);
2668 }
2669
2670 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
2671 data &= 0xffffffc0;
2672 if (orig != data)
2673 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
2674
2675 tmp = gfx_v6_0_halt_rlc(adev);
2676
2677 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
2678 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
2679 WREG32(mmRLC_SERDES_WR_CTRL, 0x00d000ff);
2680
2681 gfx_v6_0_update_rlc(adev, tmp);
2682 } else {
2683 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
2684 data |= 0x00000003;
2685 if (orig != data)
2686 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
2687
2688 data = RREG32(mmCP_MEM_SLP_CNTL);
2689 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2690 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2691 WREG32(mmCP_MEM_SLP_CNTL, data);
2692 }
2693 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
2694 data |= CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK | CGTS_SM_CTRL_REG__OVERRIDE_MASK;
2695 if (orig != data)
2696 WREG32(mmCGTS_SM_CTRL_REG, data);
2697
2698 tmp = gfx_v6_0_halt_rlc(adev);
2699
2700 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
2701 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
2702 WREG32(mmRLC_SERDES_WR_CTRL, 0x00e000ff);
2703
2704 gfx_v6_0_update_rlc(adev, tmp);
2705 }
2706 }
2707 /*
2708 static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
2709 bool enable)
2710 {
2711 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
2712 if (enable) {
2713 gfx_v6_0_enable_mgcg(adev, true);
2714 gfx_v6_0_enable_cgcg(adev, true);
2715 } else {
2716 gfx_v6_0_enable_cgcg(adev, false);
2717 gfx_v6_0_enable_mgcg(adev, false);
2718 }
2719 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
2720 }
2721 */
2722
gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device * adev,bool enable)2723 static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
2724 bool enable)
2725 {
2726 }
2727
gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device * adev,bool enable)2728 static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
2729 bool enable)
2730 {
2731 }
2732
gfx_v6_0_enable_cp_pg(struct amdgpu_device * adev,bool enable)2733 static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
2734 {
2735 u32 data, orig;
2736
2737 orig = data = RREG32(mmRLC_PG_CNTL);
2738 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
2739 data &= ~0x8000;
2740 else
2741 data |= 0x8000;
2742 if (orig != data)
2743 WREG32(mmRLC_PG_CNTL, data);
2744 }
2745
gfx_v6_0_enable_gds_pg(struct amdgpu_device * adev,bool enable)2746 static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
2747 {
2748 }
2749 /*
2750 static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
2751 {
2752 const __le32 *fw_data;
2753 volatile u32 *dst_ptr;
2754 int me, i, max_me = 4;
2755 u32 bo_offset = 0;
2756 u32 table_offset, table_size;
2757
2758 if (adev->asic_type == CHIP_KAVERI)
2759 max_me = 5;
2760
2761 if (adev->gfx.rlc.cp_table_ptr == NULL)
2762 return;
2763
2764 dst_ptr = adev->gfx.rlc.cp_table_ptr;
2765 for (me = 0; me < max_me; me++) {
2766 if (me == 0) {
2767 const struct gfx_firmware_header_v1_0 *hdr =
2768 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2769 fw_data = (const __le32 *)
2770 (adev->gfx.ce_fw->data +
2771 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2772 table_offset = le32_to_cpu(hdr->jt_offset);
2773 table_size = le32_to_cpu(hdr->jt_size);
2774 } else if (me == 1) {
2775 const struct gfx_firmware_header_v1_0 *hdr =
2776 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2777 fw_data = (const __le32 *)
2778 (adev->gfx.pfp_fw->data +
2779 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2780 table_offset = le32_to_cpu(hdr->jt_offset);
2781 table_size = le32_to_cpu(hdr->jt_size);
2782 } else if (me == 2) {
2783 const struct gfx_firmware_header_v1_0 *hdr =
2784 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2785 fw_data = (const __le32 *)
2786 (adev->gfx.me_fw->data +
2787 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2788 table_offset = le32_to_cpu(hdr->jt_offset);
2789 table_size = le32_to_cpu(hdr->jt_size);
2790 } else if (me == 3) {
2791 const struct gfx_firmware_header_v1_0 *hdr =
2792 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2793 fw_data = (const __le32 *)
2794 (adev->gfx.mec_fw->data +
2795 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2796 table_offset = le32_to_cpu(hdr->jt_offset);
2797 table_size = le32_to_cpu(hdr->jt_size);
2798 } else {
2799 const struct gfx_firmware_header_v1_0 *hdr =
2800 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2801 fw_data = (const __le32 *)
2802 (adev->gfx.mec2_fw->data +
2803 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2804 table_offset = le32_to_cpu(hdr->jt_offset);
2805 table_size = le32_to_cpu(hdr->jt_size);
2806 }
2807
2808 for (i = 0; i < table_size; i ++) {
2809 dst_ptr[bo_offset + i] =
2810 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
2811 }
2812
2813 bo_offset += table_size;
2814 }
2815 }
2816 */
gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device * adev,bool enable)2817 static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
2818 bool enable)
2819 {
2820 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
2821 WREG32(mmRLC_TTOP_D, RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10));
2822 WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, 1);
2823 WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 1);
2824 } else {
2825 WREG32_FIELD(RLC_AUTO_PG_CTRL, AUTO_PG_EN, 0);
2826 (void)RREG32(mmDB_RENDER_CONTROL);
2827 }
2828 }
2829
gfx_v6_0_init_ao_cu_mask(struct amdgpu_device * adev)2830 static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
2831 {
2832 u32 tmp;
2833
2834 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
2835
2836 tmp = RREG32(mmRLC_MAX_PG_CU);
2837 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
2838 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
2839 WREG32(mmRLC_MAX_PG_CU, tmp);
2840 }
2841
gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device * adev,bool enable)2842 static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
2843 bool enable)
2844 {
2845 u32 data, orig;
2846
2847 orig = data = RREG32(mmRLC_PG_CNTL);
2848 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
2849 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
2850 else
2851 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
2852 if (orig != data)
2853 WREG32(mmRLC_PG_CNTL, data);
2854 }
2855
gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device * adev,bool enable)2856 static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
2857 bool enable)
2858 {
2859 u32 data, orig;
2860
2861 orig = data = RREG32(mmRLC_PG_CNTL);
2862 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
2863 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
2864 else
2865 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
2866 if (orig != data)
2867 WREG32(mmRLC_PG_CNTL, data);
2868 }
2869
gfx_v6_0_init_gfx_cgpg(struct amdgpu_device * adev)2870 static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
2871 {
2872 u32 tmp;
2873
2874 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
2875 WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_SRC, 1);
2876 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
2877
2878 tmp = RREG32(mmRLC_AUTO_PG_CTRL);
2879 tmp &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2880 tmp |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2881 tmp &= ~RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK;
2882 WREG32(mmRLC_AUTO_PG_CTRL, tmp);
2883 }
2884
gfx_v6_0_update_gfx_pg(struct amdgpu_device * adev,bool enable)2885 static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
2886 {
2887 gfx_v6_0_enable_gfx_cgpg(adev, enable);
2888 gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
2889 gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
2890 }
2891
gfx_v6_0_get_csb_size(struct amdgpu_device * adev)2892 static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
2893 {
2894 u32 count = 0;
2895 const struct cs_section_def *sect = NULL;
2896 const struct cs_extent_def *ext = NULL;
2897
2898 if (adev->gfx.rlc.cs_data == NULL)
2899 return 0;
2900
2901 /* begin clear state */
2902 count += 2;
2903 /* context control state */
2904 count += 3;
2905
2906 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2907 for (ext = sect->section; ext->extent != NULL; ++ext) {
2908 if (sect->id == SECT_CONTEXT)
2909 count += 2 + ext->reg_count;
2910 else
2911 return 0;
2912 }
2913 }
2914 /* pa_sc_raster_config */
2915 count += 3;
2916 /* end clear state */
2917 count += 2;
2918 /* clear state */
2919 count += 2;
2920
2921 return count;
2922 }
2923
gfx_v6_0_get_csb_buffer(struct amdgpu_device * adev,u32 * buffer)2924 static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
2925 {
2926 u32 count = 0;
2927
2928 if (adev->gfx.rlc.cs_data == NULL)
2929 return;
2930 if (buffer == NULL)
2931 return;
2932
2933 count = amdgpu_gfx_csb_preamble_start(buffer);
2934 count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
2935
2936 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2937 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2938 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
2939
2940 amdgpu_gfx_csb_preamble_end(buffer, count);
2941 }
2942
gfx_v6_0_init_pg(struct amdgpu_device * adev)2943 static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
2944 {
2945 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2946 AMD_PG_SUPPORT_GFX_SMG |
2947 AMD_PG_SUPPORT_GFX_DMG |
2948 AMD_PG_SUPPORT_CP |
2949 AMD_PG_SUPPORT_GDS |
2950 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2951 gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
2952 gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
2953 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
2954 gfx_v6_0_init_gfx_cgpg(adev);
2955 gfx_v6_0_enable_cp_pg(adev, true);
2956 gfx_v6_0_enable_gds_pg(adev, true);
2957 } else {
2958 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
2959 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
2960
2961 }
2962 gfx_v6_0_init_ao_cu_mask(adev);
2963 gfx_v6_0_update_gfx_pg(adev, true);
2964 } else {
2965
2966 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
2967 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
2968 }
2969 }
2970
gfx_v6_0_fini_pg(struct amdgpu_device * adev)2971 static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
2972 {
2973 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2974 AMD_PG_SUPPORT_GFX_SMG |
2975 AMD_PG_SUPPORT_GFX_DMG |
2976 AMD_PG_SUPPORT_CP |
2977 AMD_PG_SUPPORT_GDS |
2978 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2979 gfx_v6_0_update_gfx_pg(adev, false);
2980 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
2981 gfx_v6_0_enable_cp_pg(adev, false);
2982 gfx_v6_0_enable_gds_pg(adev, false);
2983 }
2984 }
2985 }
2986
gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device * adev)2987 static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2988 {
2989 uint64_t clock;
2990
2991 mutex_lock(&adev->gfx.gpu_clock_mutex);
2992 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
2993 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
2994 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2995 mutex_unlock(&adev->gfx.gpu_clock_mutex);
2996 return clock;
2997 }
2998
gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring * ring,uint32_t flags)2999 static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3000 {
3001 if (flags & AMDGPU_HAVE_CTX_SWITCH)
3002 gfx_v6_0_ring_emit_vgt_flush(ring);
3003 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3004 amdgpu_ring_write(ring, 0x80000000);
3005 amdgpu_ring_write(ring, 0);
3006 }
3007
3008
wave_read_ind(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t address)3009 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
3010 {
3011 WREG32(mmSQ_IND_INDEX,
3012 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
3013 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
3014 (address << SQ_IND_INDEX__INDEX__SHIFT) |
3015 (SQ_IND_INDEX__FORCE_READ_MASK));
3016 return RREG32(mmSQ_IND_DATA);
3017 }
3018
wave_read_regs(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)3019 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
3020 uint32_t wave, uint32_t thread,
3021 uint32_t regno, uint32_t num, uint32_t *out)
3022 {
3023 WREG32(mmSQ_IND_INDEX,
3024 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
3025 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
3026 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
3027 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
3028 (SQ_IND_INDEX__FORCE_READ_MASK) |
3029 (SQ_IND_INDEX__AUTO_INCR_MASK));
3030 while (num--)
3031 *(out++) = RREG32(mmSQ_IND_DATA);
3032 }
3033
gfx_v6_0_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)3034 static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
3035 {
3036 /* type 0 wave data */
3037 dst[(*no_fields)++] = 0;
3038 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
3039 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
3040 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
3041 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
3042 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
3043 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
3044 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
3045 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
3046 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
3047 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
3048 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
3049 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
3050 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
3051 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
3052 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
3053 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
3054 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
3055 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
3056 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
3057 }
3058
gfx_v6_0_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)3059 static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
3060 uint32_t wave, uint32_t start,
3061 uint32_t size, uint32_t *dst)
3062 {
3063 wave_read_regs(
3064 adev, simd, wave, 0,
3065 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
3066 }
3067
gfx_v6_0_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)3068 static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
3069 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
3070 {
3071 drm_info(adev_to_drm(adev), "Not implemented\n");
3072 }
3073
3074 static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
3075 .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
3076 .select_se_sh = &gfx_v6_0_select_se_sh,
3077 .read_wave_data = &gfx_v6_0_read_wave_data,
3078 .read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
3079 .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
3080 };
3081
3082 static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
3083 .init = gfx_v6_0_rlc_init,
3084 .resume = gfx_v6_0_rlc_resume,
3085 .stop = gfx_v6_0_rlc_stop,
3086 .reset = gfx_v6_0_rlc_reset,
3087 .start = gfx_v6_0_rlc_start
3088 };
3089
gfx_v6_0_early_init(struct amdgpu_ip_block * ip_block)3090 static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block)
3091 {
3092 struct amdgpu_device *adev = ip_block->adev;
3093
3094 adev->gfx.xcc_mask = 1;
3095 adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
3096 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
3097 GFX6_NUM_COMPUTE_RINGS);
3098 adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
3099 adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
3100 gfx_v6_0_set_ring_funcs(adev);
3101 gfx_v6_0_set_irq_funcs(adev);
3102
3103 return 0;
3104 }
3105
gfx_v6_0_sw_init(struct amdgpu_ip_block * ip_block)3106 static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
3107 {
3108 struct amdgpu_ring *ring;
3109 struct amdgpu_device *adev = ip_block->adev;
3110 int i, r;
3111
3112 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
3113 if (r)
3114 return r;
3115
3116 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
3117 if (r)
3118 return r;
3119
3120 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
3121 if (r)
3122 return r;
3123
3124 r = gfx_v6_0_init_microcode(adev);
3125 if (r) {
3126 DRM_ERROR("Failed to load gfx firmware!\n");
3127 return r;
3128 }
3129
3130 r = adev->gfx.rlc.funcs->init(adev);
3131 if (r) {
3132 DRM_ERROR("Failed to init rlc BOs!\n");
3133 return r;
3134 }
3135
3136 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3137 ring = &adev->gfx.gfx_ring[i];
3138 ring->ring_obj = NULL;
3139 sprintf(ring->name, "gfx");
3140 r = amdgpu_ring_init(adev, ring, 2048,
3141 &adev->gfx.eop_irq,
3142 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
3143 AMDGPU_RING_PRIO_DEFAULT, NULL);
3144 if (r)
3145 return r;
3146 }
3147
3148 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3149 unsigned irq_type;
3150
3151 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
3152 DRM_ERROR("Too many (%d) compute rings!\n", i);
3153 break;
3154 }
3155 ring = &adev->gfx.compute_ring[i];
3156 ring->ring_obj = NULL;
3157 ring->use_doorbell = false;
3158 ring->doorbell_index = 0;
3159 ring->me = 1;
3160 ring->pipe = i;
3161 ring->queue = i;
3162 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
3163 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
3164 r = amdgpu_ring_init(adev, ring, 1024,
3165 &adev->gfx.eop_irq, irq_type,
3166 AMDGPU_RING_PRIO_DEFAULT, NULL);
3167 if (r)
3168 return r;
3169 }
3170
3171 adev->gfx.gfx_supported_reset =
3172 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
3173 adev->gfx.compute_supported_reset =
3174 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
3175
3176 return r;
3177 }
3178
gfx_v6_0_sw_fini(struct amdgpu_ip_block * ip_block)3179 static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
3180 {
3181 int i;
3182 struct amdgpu_device *adev = ip_block->adev;
3183
3184 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3185 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
3186 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3187 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
3188
3189 amdgpu_gfx_rlc_fini(adev);
3190
3191 return 0;
3192 }
3193
gfx_v6_0_hw_init(struct amdgpu_ip_block * ip_block)3194 static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
3195 {
3196 int r;
3197 struct amdgpu_device *adev = ip_block->adev;
3198
3199 gfx_v6_0_constants_init(adev);
3200
3201 r = adev->gfx.rlc.funcs->resume(adev);
3202 if (r)
3203 return r;
3204
3205 r = gfx_v6_0_cp_resume(adev);
3206 if (r)
3207 return r;
3208
3209 adev->gfx.ce_ram_size = 0x8000;
3210
3211 return r;
3212 }
3213
gfx_v6_0_hw_fini(struct amdgpu_ip_block * ip_block)3214 static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
3215 {
3216 struct amdgpu_device *adev = ip_block->adev;
3217
3218 gfx_v6_0_cp_enable(adev, false);
3219 adev->gfx.rlc.funcs->stop(adev);
3220 gfx_v6_0_fini_pg(adev);
3221
3222 return 0;
3223 }
3224
gfx_v6_0_suspend(struct amdgpu_ip_block * ip_block)3225 static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block)
3226 {
3227 return gfx_v6_0_hw_fini(ip_block);
3228 }
3229
gfx_v6_0_resume(struct amdgpu_ip_block * ip_block)3230 static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
3231 {
3232 return gfx_v6_0_hw_init(ip_block);
3233 }
3234
gfx_v6_0_is_idle(struct amdgpu_ip_block * ip_block)3235 static bool gfx_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
3236 {
3237 struct amdgpu_device *adev = ip_block->adev;
3238
3239 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
3240 return false;
3241 else
3242 return true;
3243 }
3244
gfx_v6_0_wait_for_idle(struct amdgpu_ip_block * ip_block)3245 static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
3246 {
3247 unsigned i;
3248 struct amdgpu_device *adev = ip_block->adev;
3249
3250 for (i = 0; i < adev->usec_timeout; i++) {
3251 if (gfx_v6_0_is_idle(ip_block))
3252 return 0;
3253 udelay(1);
3254 }
3255 return -ETIMEDOUT;
3256 }
3257
gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device * adev,enum amdgpu_interrupt_state state)3258 static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3259 enum amdgpu_interrupt_state state)
3260 {
3261 u32 cp_int_cntl;
3262
3263 switch (state) {
3264 case AMDGPU_IRQ_STATE_DISABLE:
3265 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3266 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
3267 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3268 break;
3269 case AMDGPU_IRQ_STATE_ENABLE:
3270 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3271 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
3272 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3273 break;
3274 default:
3275 break;
3276 }
3277 }
3278
gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int ring,enum amdgpu_interrupt_state state)3279 static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3280 int ring,
3281 enum amdgpu_interrupt_state state)
3282 {
3283 u32 cp_int_cntl;
3284 switch (state){
3285 case AMDGPU_IRQ_STATE_DISABLE:
3286 if (ring == 0) {
3287 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
3288 cp_int_cntl &= ~CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
3289 WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
3290 break;
3291 } else {
3292 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
3293 cp_int_cntl &= ~CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
3294 WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
3295 break;
3296
3297 }
3298 case AMDGPU_IRQ_STATE_ENABLE:
3299 if (ring == 0) {
3300 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING1);
3301 cp_int_cntl |= CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK;
3302 WREG32(mmCP_INT_CNTL_RING1, cp_int_cntl);
3303 break;
3304 } else {
3305 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING2);
3306 cp_int_cntl |= CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK;
3307 WREG32(mmCP_INT_CNTL_RING2, cp_int_cntl);
3308 break;
3309
3310 }
3311
3312 default:
3313 BUG();
3314 break;
3315
3316 }
3317 }
3318
gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3319 static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3320 struct amdgpu_irq_src *src,
3321 unsigned type,
3322 enum amdgpu_interrupt_state state)
3323 {
3324 u32 cp_int_cntl;
3325
3326 switch (state) {
3327 case AMDGPU_IRQ_STATE_DISABLE:
3328 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3329 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
3330 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3331 break;
3332 case AMDGPU_IRQ_STATE_ENABLE:
3333 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3334 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
3335 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3336 break;
3337 default:
3338 break;
3339 }
3340
3341 return 0;
3342 }
3343
gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3344 static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3345 struct amdgpu_irq_src *src,
3346 unsigned type,
3347 enum amdgpu_interrupt_state state)
3348 {
3349 u32 cp_int_cntl;
3350
3351 switch (state) {
3352 case AMDGPU_IRQ_STATE_DISABLE:
3353 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3354 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
3355 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3356 break;
3357 case AMDGPU_IRQ_STATE_ENABLE:
3358 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
3359 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
3360 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
3361 break;
3362 default:
3363 break;
3364 }
3365
3366 return 0;
3367 }
3368
gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3369 static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
3370 struct amdgpu_irq_src *src,
3371 unsigned type,
3372 enum amdgpu_interrupt_state state)
3373 {
3374 switch (type) {
3375 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
3376 gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
3377 break;
3378 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3379 gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
3380 break;
3381 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3382 gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
3383 break;
3384 default:
3385 break;
3386 }
3387 return 0;
3388 }
3389
gfx_v6_0_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3390 static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
3391 struct amdgpu_irq_src *source,
3392 struct amdgpu_iv_entry *entry)
3393 {
3394 switch (entry->ring_id) {
3395 case 0:
3396 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
3397 break;
3398 case 1:
3399 case 2:
3400 amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id - 1]);
3401 break;
3402 default:
3403 break;
3404 }
3405 return 0;
3406 }
3407
gfx_v6_0_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3408 static void gfx_v6_0_fault(struct amdgpu_device *adev,
3409 struct amdgpu_iv_entry *entry)
3410 {
3411 struct amdgpu_ring *ring;
3412
3413 switch (entry->ring_id) {
3414 case 0:
3415 ring = &adev->gfx.gfx_ring[0];
3416 break;
3417 case 1:
3418 case 2:
3419 ring = &adev->gfx.compute_ring[entry->ring_id - 1];
3420 break;
3421 default:
3422 return;
3423 }
3424 drm_sched_fault(&ring->sched);
3425 }
3426
gfx_v6_0_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3427 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
3428 struct amdgpu_irq_src *source,
3429 struct amdgpu_iv_entry *entry)
3430 {
3431 DRM_ERROR("Illegal register access in command stream\n");
3432 gfx_v6_0_fault(adev, entry);
3433 return 0;
3434 }
3435
gfx_v6_0_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3436 static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
3437 struct amdgpu_irq_src *source,
3438 struct amdgpu_iv_entry *entry)
3439 {
3440 DRM_ERROR("Illegal instruction in command stream\n");
3441 gfx_v6_0_fault(adev, entry);
3442 return 0;
3443 }
3444
gfx_v6_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)3445 static int gfx_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3446 enum amd_clockgating_state state)
3447 {
3448 bool gate = false;
3449 struct amdgpu_device *adev = ip_block->adev;
3450
3451 if (state == AMD_CG_STATE_GATE)
3452 gate = true;
3453
3454 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
3455 if (gate) {
3456 gfx_v6_0_enable_mgcg(adev, true);
3457 gfx_v6_0_enable_cgcg(adev, true);
3458 } else {
3459 gfx_v6_0_enable_cgcg(adev, false);
3460 gfx_v6_0_enable_mgcg(adev, false);
3461 }
3462 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
3463
3464 return 0;
3465 }
3466
gfx_v6_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)3467 static int gfx_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3468 enum amd_powergating_state state)
3469 {
3470 bool gate = false;
3471 struct amdgpu_device *adev = ip_block->adev;
3472
3473 if (state == AMD_PG_STATE_GATE)
3474 gate = true;
3475
3476 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3477 AMD_PG_SUPPORT_GFX_SMG |
3478 AMD_PG_SUPPORT_GFX_DMG |
3479 AMD_PG_SUPPORT_CP |
3480 AMD_PG_SUPPORT_GDS |
3481 AMD_PG_SUPPORT_RLC_SMU_HS)) {
3482 gfx_v6_0_update_gfx_pg(adev, gate);
3483 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
3484 gfx_v6_0_enable_cp_pg(adev, gate);
3485 gfx_v6_0_enable_gds_pg(adev, gate);
3486 }
3487 }
3488
3489 return 0;
3490 }
3491
gfx_v6_0_emit_mem_sync(struct amdgpu_ring * ring)3492 static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
3493 {
3494 amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3495 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3496 PACKET3_TC_ACTION_ENA |
3497 PACKET3_SH_KCACHE_ACTION_ENA |
3498 PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
3499 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3500 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3501 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
3502 }
3503
3504 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
3505 .name = "gfx_v6_0",
3506 .early_init = gfx_v6_0_early_init,
3507 .sw_init = gfx_v6_0_sw_init,
3508 .sw_fini = gfx_v6_0_sw_fini,
3509 .hw_init = gfx_v6_0_hw_init,
3510 .hw_fini = gfx_v6_0_hw_fini,
3511 .suspend = gfx_v6_0_suspend,
3512 .resume = gfx_v6_0_resume,
3513 .is_idle = gfx_v6_0_is_idle,
3514 .wait_for_idle = gfx_v6_0_wait_for_idle,
3515 .set_clockgating_state = gfx_v6_0_set_clockgating_state,
3516 .set_powergating_state = gfx_v6_0_set_powergating_state,
3517 };
3518
3519 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
3520 .type = AMDGPU_RING_TYPE_GFX,
3521 .align_mask = 0xff,
3522 .nop = 0x80000000,
3523 .support_64bit_ptrs = false,
3524 .get_rptr = gfx_v6_0_ring_get_rptr,
3525 .get_wptr = gfx_v6_0_ring_get_wptr,
3526 .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
3527 .emit_frame_size =
3528 5 + 5 + /* hdp flush / invalidate */
3529 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3530 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
3531 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3532 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
3533 5, /* SURFACE_SYNC */
3534 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3535 .emit_ib = gfx_v6_0_ring_emit_ib,
3536 .emit_fence = gfx_v6_0_ring_emit_fence,
3537 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
3538 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
3539 .test_ring = gfx_v6_0_ring_test_ring,
3540 .test_ib = gfx_v6_0_ring_test_ib,
3541 .insert_nop = amdgpu_ring_insert_nop,
3542 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
3543 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3544 .emit_mem_sync = gfx_v6_0_emit_mem_sync,
3545 };
3546
3547 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
3548 .type = AMDGPU_RING_TYPE_COMPUTE,
3549 .align_mask = 0xff,
3550 .nop = 0x80000000,
3551 .get_rptr = gfx_v6_0_ring_get_rptr,
3552 .get_wptr = gfx_v6_0_ring_get_wptr,
3553 .set_wptr = gfx_v6_0_ring_set_wptr_compute,
3554 .emit_frame_size =
3555 5 + 5 + /* hdp flush / invalidate */
3556 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
3557 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
3558 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3559 5, /* SURFACE_SYNC */
3560 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
3561 .emit_ib = gfx_v6_0_ring_emit_ib,
3562 .emit_fence = gfx_v6_0_ring_emit_fence,
3563 .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
3564 .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
3565 .test_ring = gfx_v6_0_ring_test_ring,
3566 .test_ib = gfx_v6_0_ring_test_ib,
3567 .insert_nop = amdgpu_ring_insert_nop,
3568 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3569 .emit_mem_sync = gfx_v6_0_emit_mem_sync,
3570 };
3571
gfx_v6_0_set_ring_funcs(struct amdgpu_device * adev)3572 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
3573 {
3574 int i;
3575
3576 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3577 adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
3578 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3579 adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
3580 }
3581
3582 static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
3583 .set = gfx_v6_0_set_eop_interrupt_state,
3584 .process = gfx_v6_0_eop_irq,
3585 };
3586
3587 static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
3588 .set = gfx_v6_0_set_priv_reg_fault_state,
3589 .process = gfx_v6_0_priv_reg_irq,
3590 };
3591
3592 static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
3593 .set = gfx_v6_0_set_priv_inst_fault_state,
3594 .process = gfx_v6_0_priv_inst_irq,
3595 };
3596
gfx_v6_0_set_irq_funcs(struct amdgpu_device * adev)3597 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3598 {
3599 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
3600 adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
3601
3602 adev->gfx.priv_reg_irq.num_types = 1;
3603 adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
3604
3605 adev->gfx.priv_inst_irq.num_types = 1;
3606 adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
3607 }
3608
gfx_v6_0_get_cu_info(struct amdgpu_device * adev)3609 static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
3610 {
3611 int i, j, k, counter, active_cu_number = 0;
3612 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
3613 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
3614 unsigned disable_masks[4 * 2];
3615 u32 ao_cu_num;
3616
3617 if (adev->flags & AMD_IS_APU)
3618 ao_cu_num = 2;
3619 else
3620 ao_cu_num = adev->gfx.config.max_cu_per_sh;
3621
3622 memset(cu_info, 0, sizeof(*cu_info));
3623
3624 amdgpu_gfx_parse_disable_cu(adev, disable_masks, 4, 2);
3625
3626 mutex_lock(&adev->grbm_idx_mutex);
3627 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3628 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3629 mask = 1;
3630 ao_bitmap = 0;
3631 counter = 0;
3632 gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3633 if (i < 4 && j < 2)
3634 gfx_v6_0_set_user_cu_inactive_bitmap(
3635 adev, disable_masks[i * 2 + j]);
3636 bitmap = gfx_v6_0_get_cu_enabled(adev);
3637 cu_info->bitmap[0][i][j] = bitmap;
3638
3639 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3640 if (bitmap & mask) {
3641 if (counter < ao_cu_num)
3642 ao_bitmap |= mask;
3643 counter ++;
3644 }
3645 mask <<= 1;
3646 }
3647 active_cu_number += counter;
3648 if (i < 2 && j < 2)
3649 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
3650 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
3651 }
3652 }
3653
3654 gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3655 mutex_unlock(&adev->grbm_idx_mutex);
3656
3657 cu_info->number = active_cu_number;
3658 cu_info->ao_cu_mask = ao_cu_mask;
3659 }
3660
3661 const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
3662 {
3663 .type = AMD_IP_BLOCK_TYPE_GFX,
3664 .major = 6,
3665 .minor = 0,
3666 .rev = 0,
3667 .funcs = &gfx_v6_0_ip_funcs,
3668 };
3669