1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "gmc_v8_0.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_gem.h"
34
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
40
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43
44 #include "dce/dce_10_0_d.h"
45 #include "dce/dce_10_0_sh_mask.h"
46
47 #include "vid.h"
48 #include "vi.h"
49
50 #include "amdgpu_atombios.h"
51
52 #include "ivsrcid/ivsrcid_vislands30.h"
53
54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56 static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
57
58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62 MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
63 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
64 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
65 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
66
67 static const u32 golden_settings_tonga_a11[] = {
68 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
69 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
70 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
71 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 };
76
77 static const u32 tonga_mgcg_cgcg_init[] = {
78 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
79 };
80
81 static const u32 golden_settings_fiji_a10[] = {
82 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
83 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
84 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 };
87
88 static const u32 fiji_mgcg_cgcg_init[] = {
89 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
90 };
91
92 static const u32 golden_settings_polaris11_a11[] = {
93 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
95 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
96 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
97 };
98
99 static const u32 golden_settings_polaris10_a11[] = {
100 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
101 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
102 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
103 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
104 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
105 };
106
107 static const u32 cz_mgcg_cgcg_init[] = {
108 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
109 };
110
111 static const u32 stoney_mgcg_cgcg_init[] = {
112 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
113 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
114 };
115
116 static const u32 golden_settings_stoney_common[] = {
117 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
118 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
119 };
120
gmc_v8_0_init_golden_registers(struct amdgpu_device * adev)121 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
122 {
123 switch (adev->asic_type) {
124 case CHIP_FIJI:
125 amdgpu_device_program_register_sequence(adev,
126 fiji_mgcg_cgcg_init,
127 ARRAY_SIZE(fiji_mgcg_cgcg_init));
128 amdgpu_device_program_register_sequence(adev,
129 golden_settings_fiji_a10,
130 ARRAY_SIZE(golden_settings_fiji_a10));
131 break;
132 case CHIP_TONGA:
133 amdgpu_device_program_register_sequence(adev,
134 tonga_mgcg_cgcg_init,
135 ARRAY_SIZE(tonga_mgcg_cgcg_init));
136 amdgpu_device_program_register_sequence(adev,
137 golden_settings_tonga_a11,
138 ARRAY_SIZE(golden_settings_tonga_a11));
139 break;
140 case CHIP_POLARIS11:
141 case CHIP_POLARIS12:
142 case CHIP_VEGAM:
143 amdgpu_device_program_register_sequence(adev,
144 golden_settings_polaris11_a11,
145 ARRAY_SIZE(golden_settings_polaris11_a11));
146 break;
147 case CHIP_POLARIS10:
148 amdgpu_device_program_register_sequence(adev,
149 golden_settings_polaris10_a11,
150 ARRAY_SIZE(golden_settings_polaris10_a11));
151 break;
152 case CHIP_CARRIZO:
153 amdgpu_device_program_register_sequence(adev,
154 cz_mgcg_cgcg_init,
155 ARRAY_SIZE(cz_mgcg_cgcg_init));
156 break;
157 case CHIP_STONEY:
158 amdgpu_device_program_register_sequence(adev,
159 stoney_mgcg_cgcg_init,
160 ARRAY_SIZE(stoney_mgcg_cgcg_init));
161 amdgpu_device_program_register_sequence(adev,
162 golden_settings_stoney_common,
163 ARRAY_SIZE(golden_settings_stoney_common));
164 break;
165 default:
166 break;
167 }
168 }
169
gmc_v8_0_mc_stop(struct amdgpu_device * adev)170 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
171 {
172 u32 blackout;
173 struct amdgpu_ip_block *ip_block;
174
175 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
176 if (!ip_block)
177 return;
178
179 gmc_v8_0_wait_for_idle(ip_block);
180
181 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
182 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
183 /* Block CPU access */
184 WREG32(mmBIF_FB_EN, 0);
185 /* blackout the MC */
186 blackout = REG_SET_FIELD(blackout,
187 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
188 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
189 }
190 /* wait for the MC to settle */
191 udelay(100);
192 }
193
gmc_v8_0_mc_resume(struct amdgpu_device * adev)194 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
195 {
196 u32 tmp;
197
198 /* unblackout the MC */
199 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
200 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
201 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
202 /* allow CPU access */
203 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
204 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
205 WREG32(mmBIF_FB_EN, tmp);
206 }
207
208 /**
209 * gmc_v8_0_init_microcode - load ucode images from disk
210 *
211 * @adev: amdgpu_device pointer
212 *
213 * Use the firmware interface to load the ucode images into
214 * the driver (not loaded into hw).
215 * Returns 0 on success, error on failure.
216 */
gmc_v8_0_init_microcode(struct amdgpu_device * adev)217 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
218 {
219 const char *chip_name;
220 int err;
221
222 DRM_DEBUG("\n");
223
224 switch (adev->asic_type) {
225 case CHIP_TONGA:
226 chip_name = "tonga";
227 break;
228 case CHIP_POLARIS11:
229 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
230 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
231 chip_name = "polaris11_k";
232 else
233 chip_name = "polaris11";
234 break;
235 case CHIP_POLARIS10:
236 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
237 chip_name = "polaris10_k";
238 else
239 chip_name = "polaris10";
240 break;
241 case CHIP_POLARIS12:
242 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
243 chip_name = "polaris12_k";
244 } else {
245 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
246 /* Polaris12 32bit ASIC needs a special MC firmware */
247 if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
248 chip_name = "polaris12_32";
249 else
250 chip_name = "polaris12";
251 }
252 break;
253 case CHIP_FIJI:
254 case CHIP_CARRIZO:
255 case CHIP_STONEY:
256 case CHIP_VEGAM:
257 return 0;
258 default:
259 return -EINVAL;
260 }
261
262 err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
263 "amdgpu/%s_mc.bin", chip_name);
264 if (err) {
265 pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
266 amdgpu_ucode_release(&adev->gmc.fw);
267 }
268 return err;
269 }
270
271 /**
272 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
273 *
274 * @adev: amdgpu_device pointer
275 *
276 * Load the GDDR MC ucode into the hw (VI).
277 * Returns 0 on success, error on failure.
278 */
gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device * adev)279 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
280 {
281 const struct mc_firmware_header_v1_0 *hdr;
282 const __le32 *fw_data = NULL;
283 const __le32 *io_mc_regs = NULL;
284 u32 running;
285 int i, ucode_size, regs_size;
286
287 /* Skip MC ucode loading on SR-IOV capable boards.
288 * vbios does this for us in asic_init in that case.
289 * Skip MC ucode loading on VF, because hypervisor will do that
290 * for this adaptor.
291 */
292 if (amdgpu_sriov_bios(adev))
293 return 0;
294
295 if (!adev->gmc.fw)
296 return -EINVAL;
297
298 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
299 amdgpu_ucode_print_mc_hdr(&hdr->header);
300
301 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
302 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
303 io_mc_regs = (const __le32 *)
304 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
305 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
306 fw_data = (const __le32 *)
307 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
308
309 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
310
311 if (running == 0) {
312 /* reset the engine and set to writable */
313 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
314 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
315
316 /* load mc io regs */
317 for (i = 0; i < regs_size; i++) {
318 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
319 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
320 }
321 /* load the MC ucode */
322 for (i = 0; i < ucode_size; i++)
323 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
324
325 /* put the engine back into the active state */
326 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
327 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
328 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
329
330 /* wait for training to complete */
331 for (i = 0; i < adev->usec_timeout; i++) {
332 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
333 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
334 break;
335 udelay(1);
336 }
337 for (i = 0; i < adev->usec_timeout; i++) {
338 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
339 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
340 break;
341 udelay(1);
342 }
343 }
344
345 return 0;
346 }
347
gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device * adev)348 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
349 {
350 const struct mc_firmware_header_v1_0 *hdr;
351 const __le32 *fw_data = NULL;
352 const __le32 *io_mc_regs = NULL;
353 u32 data;
354 int i, ucode_size, regs_size;
355
356 /* Skip MC ucode loading on SR-IOV capable boards.
357 * vbios does this for us in asic_init in that case.
358 * Skip MC ucode loading on VF, because hypervisor will do that
359 * for this adaptor.
360 */
361 if (amdgpu_sriov_bios(adev))
362 return 0;
363
364 if (!adev->gmc.fw)
365 return -EINVAL;
366
367 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
368 amdgpu_ucode_print_mc_hdr(&hdr->header);
369
370 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
371 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
372 io_mc_regs = (const __le32 *)
373 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
374 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
375 fw_data = (const __le32 *)
376 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
377
378 data = RREG32(mmMC_SEQ_MISC0);
379 data &= ~(0x40);
380 WREG32(mmMC_SEQ_MISC0, data);
381
382 /* load mc io regs */
383 for (i = 0; i < regs_size; i++) {
384 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
385 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
386 }
387
388 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
389 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
390
391 /* load the MC ucode */
392 for (i = 0; i < ucode_size; i++)
393 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
394
395 /* put the engine back into the active state */
396 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
397 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
398 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
399
400 /* wait for training to complete */
401 for (i = 0; i < adev->usec_timeout; i++) {
402 data = RREG32(mmMC_SEQ_MISC0);
403 if (data & 0x80)
404 break;
405 udelay(1);
406 }
407
408 return 0;
409 }
410
gmc_v8_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)411 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
412 struct amdgpu_gmc *mc)
413 {
414 u64 base = 0;
415
416 if (!amdgpu_sriov_vf(adev))
417 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
418 base <<= 24;
419
420 amdgpu_gmc_set_agp_default(adev, mc);
421 amdgpu_gmc_vram_location(adev, mc, base);
422 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
423 }
424
425 /**
426 * gmc_v8_0_mc_program - program the GPU memory controller
427 *
428 * @adev: amdgpu_device pointer
429 *
430 * Set the location of vram, gart, and AGP in the GPU's
431 * physical address space (VI).
432 */
gmc_v8_0_mc_program(struct amdgpu_device * adev)433 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
434 {
435 struct amdgpu_ip_block *ip_block;
436 u32 tmp;
437 int i, j;
438
439 /* Initialize HDP */
440 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
441 WREG32((0xb05 + j), 0x00000000);
442 WREG32((0xb06 + j), 0x00000000);
443 WREG32((0xb07 + j), 0x00000000);
444 WREG32((0xb08 + j), 0x00000000);
445 WREG32((0xb09 + j), 0x00000000);
446 }
447 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
448
449 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
450 if (!ip_block)
451 return;
452
453 if (gmc_v8_0_wait_for_idle(ip_block))
454 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
455
456 if (adev->mode_info.num_crtc) {
457 /* Lockout access through VGA aperture*/
458 tmp = RREG32(mmVGA_HDP_CONTROL);
459 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
460 WREG32(mmVGA_HDP_CONTROL, tmp);
461
462 /* disable VGA render */
463 tmp = RREG32(mmVGA_RENDER_CONTROL);
464 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
465 WREG32(mmVGA_RENDER_CONTROL, tmp);
466 }
467 /* Update configuration */
468 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
469 adev->gmc.vram_start >> 12);
470 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
471 adev->gmc.vram_end >> 12);
472 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
473 adev->mem_scratch.gpu_addr >> 12);
474
475 if (amdgpu_sriov_vf(adev)) {
476 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
477 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
478 WREG32(mmMC_VM_FB_LOCATION, tmp);
479 /* XXX double check these! */
480 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
481 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
482 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
483 }
484
485 WREG32(mmMC_VM_AGP_BASE, 0);
486 WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
487 WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
488 if (gmc_v8_0_wait_for_idle(ip_block))
489 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
490
491 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
492
493 tmp = RREG32(mmHDP_MISC_CNTL);
494 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
495 WREG32(mmHDP_MISC_CNTL, tmp);
496
497 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
498 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
499 }
500
501 /**
502 * gmc_v8_0_mc_init - initialize the memory controller driver params
503 *
504 * @adev: amdgpu_device pointer
505 *
506 * Look up the amount of vram, vram width, and decide how to place
507 * vram and gart within the GPU's physical address space (VI).
508 * Returns 0 for success.
509 */
gmc_v8_0_mc_init(struct amdgpu_device * adev)510 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
511 {
512 int r;
513 u32 tmp;
514
515 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
516 if (!adev->gmc.vram_width) {
517 int chansize, numchan;
518
519 /* Get VRAM informations */
520 tmp = RREG32(mmMC_ARB_RAMCFG);
521 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
522 chansize = 64;
523 else
524 chansize = 32;
525
526 tmp = RREG32(mmMC_SHARED_CHMAP);
527 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
528 case 0:
529 default:
530 numchan = 1;
531 break;
532 case 1:
533 numchan = 2;
534 break;
535 case 2:
536 numchan = 4;
537 break;
538 case 3:
539 numchan = 8;
540 break;
541 case 4:
542 numchan = 3;
543 break;
544 case 5:
545 numchan = 6;
546 break;
547 case 6:
548 numchan = 10;
549 break;
550 case 7:
551 numchan = 12;
552 break;
553 case 8:
554 numchan = 16;
555 break;
556 }
557 adev->gmc.vram_width = numchan * chansize;
558 }
559 /* size in MB on si */
560 tmp = RREG32(mmCONFIG_MEMSIZE);
561 /* some boards may have garbage in the upper 16 bits */
562 if (tmp & 0xffff0000) {
563 drm_info(adev_to_drm(adev), "Probably bad vram size: 0x%08x\n", tmp);
564 if (tmp & 0xffff)
565 tmp &= 0xffff;
566 }
567 adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
568 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
569
570 if (!(adev->flags & AMD_IS_APU)) {
571 r = amdgpu_device_resize_fb_bar(adev);
572 if (r)
573 return r;
574 }
575 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
576 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
577
578 #ifdef CONFIG_X86_64
579 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
580 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
581 adev->gmc.aper_size = adev->gmc.real_vram_size;
582 }
583 #endif
584
585 adev->gmc.visible_vram_size = adev->gmc.aper_size;
586
587 /* set the gart size */
588 if (amdgpu_gart_size == -1) {
589 switch (adev->asic_type) {
590 case CHIP_POLARIS10: /* all engines support GPUVM */
591 case CHIP_POLARIS11: /* all engines support GPUVM */
592 case CHIP_POLARIS12: /* all engines support GPUVM */
593 case CHIP_VEGAM: /* all engines support GPUVM */
594 default:
595 adev->gmc.gart_size = 256ULL << 20;
596 break;
597 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
598 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
599 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
600 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
601 adev->gmc.gart_size = 1024ULL << 20;
602 break;
603 }
604 } else {
605 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
606 }
607
608 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
609 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
610
611 return 0;
612 }
613
614 /**
615 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
616 *
617 * @adev: amdgpu_device pointer
618 * @pasid: pasid to be flush
619 * @flush_type: type of flush
620 * @all_hub: flush all hubs
621 * @inst: is used to select which instance of KIQ to use for the invalidation
622 *
623 * Flush the TLB for the requested pasid.
624 */
gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)625 static void gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
626 uint16_t pasid, uint32_t flush_type,
627 bool all_hub, uint32_t inst)
628 {
629 u32 mask = 0x0;
630 int vmid;
631
632 for (vmid = 1; vmid < 16; vmid++) {
633 u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
634
635 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
636 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid)
637 mask |= 1 << vmid;
638 }
639
640 WREG32(mmVM_INVALIDATE_REQUEST, mask);
641 RREG32(mmVM_INVALIDATE_RESPONSE);
642 }
643
644 /*
645 * GART
646 * VMID 0 is the physical GPU addresses as used by the kernel.
647 * VMIDs 1-15 are used for userspace clients and are handled
648 * by the amdgpu vm/hsa code.
649 */
650
651 /**
652 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
653 *
654 * @adev: amdgpu_device pointer
655 * @vmid: vm instance to flush
656 * @vmhub: which hub to flush
657 * @flush_type: type of flush
658 *
659 * Flush the TLB for the requested page table (VI).
660 */
gmc_v8_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)661 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
662 uint32_t vmhub, uint32_t flush_type)
663 {
664 /* bits 0-15 are the VM contexts0-15 */
665 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
666 }
667
gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)668 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
669 unsigned int vmid, uint64_t pd_addr)
670 {
671 uint32_t reg;
672
673 if (vmid < 8)
674 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
675 else
676 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
677 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
678
679 /* bits 0-15 are the VM contexts0-15 */
680 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
681
682 return pd_addr;
683 }
684
gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)685 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
686 unsigned int pasid)
687 {
688 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
689 }
690
691 /*
692 * PTE format on VI:
693 * 63:40 reserved
694 * 39:12 4k physical page base address
695 * 11:7 fragment
696 * 6 write
697 * 5 read
698 * 4 exe
699 * 3 reserved
700 * 2 snooped
701 * 1 system
702 * 0 valid
703 *
704 * PDE format on VI:
705 * 63:59 block fragment size
706 * 58:40 reserved
707 * 39:1 physical base address of PTE
708 * bits 5:1 must be 0.
709 * 0 valid
710 */
711
gmc_v8_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)712 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
713 uint64_t *addr, uint64_t *flags)
714 {
715 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
716 }
717
gmc_v8_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,uint32_t vm_flags,uint64_t * flags)718 static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
719 struct amdgpu_vm *vm,
720 struct amdgpu_bo *bo,
721 uint32_t vm_flags,
722 uint64_t *flags)
723 {
724 if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
725 *flags |= AMDGPU_PTE_EXECUTABLE;
726 else
727 *flags &= ~AMDGPU_PTE_EXECUTABLE;
728 *flags &= ~AMDGPU_PTE_PRT;
729 }
730
731 /**
732 * gmc_v8_0_set_fault_enable_default - update VM fault handling
733 *
734 * @adev: amdgpu_device pointer
735 * @value: true redirects VM faults to the default page
736 */
gmc_v8_0_set_fault_enable_default(struct amdgpu_device * adev,bool value)737 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
738 bool value)
739 {
740 u32 tmp;
741
742 tmp = RREG32(mmVM_CONTEXT1_CNTL);
743 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
744 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
745 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
746 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
747 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
748 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
749 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
750 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
751 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
752 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
753 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
754 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
755 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
756 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
757 WREG32(mmVM_CONTEXT1_CNTL, tmp);
758 }
759
760 /**
761 * gmc_v8_0_set_prt() - set PRT VM fault
762 *
763 * @adev: amdgpu_device pointer
764 * @enable: enable/disable VM fault handling for PRT
765 */
gmc_v8_0_set_prt(struct amdgpu_device * adev,bool enable)766 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
767 {
768 u32 tmp;
769
770 if (enable && !adev->gmc.prt_warning) {
771 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
772 adev->gmc.prt_warning = true;
773 }
774
775 tmp = RREG32(mmVM_PRT_CNTL);
776 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
777 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
778 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
779 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
780 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
781 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
782 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
783 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
784 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
785 L2_CACHE_STORE_INVALID_ENTRIES, enable);
786 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
787 L1_TLB_STORE_INVALID_ENTRIES, enable);
788 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
789 MASK_PDE0_FAULT, enable);
790 WREG32(mmVM_PRT_CNTL, tmp);
791
792 if (enable) {
793 uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
794 AMDGPU_GPU_PAGE_SHIFT;
795 uint32_t high = adev->vm_manager.max_pfn -
796 (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
797
798 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
799 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
800 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
801 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
802 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
803 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
804 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
805 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
806 } else {
807 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
808 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
809 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
810 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
811 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
812 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
813 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
814 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
815 }
816 }
817
818 /**
819 * gmc_v8_0_gart_enable - gart enable
820 *
821 * @adev: amdgpu_device pointer
822 *
823 * This sets up the TLBs, programs the page tables for VMID0,
824 * sets up the hw for VMIDs 1-15 which are allocated on
825 * demand, and sets up the global locations for the LDS, GDS,
826 * and GPUVM for FSA64 clients (VI).
827 * Returns 0 for success, errors for failure.
828 */
gmc_v8_0_gart_enable(struct amdgpu_device * adev)829 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
830 {
831 uint64_t table_addr;
832 u32 tmp, field;
833 int i;
834
835 if (adev->gart.bo == NULL) {
836 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
837 return -EINVAL;
838 }
839 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
840 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
841
842 /* Setup TLB control */
843 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
844 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
845 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
846 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
847 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
848 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
849 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
850 /* Setup L2 cache */
851 tmp = RREG32(mmVM_L2_CNTL);
852 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
853 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
854 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
855 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
857 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
858 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
859 WREG32(mmVM_L2_CNTL, tmp);
860 tmp = RREG32(mmVM_L2_CNTL2);
861 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
862 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
863 WREG32(mmVM_L2_CNTL2, tmp);
864
865 field = adev->vm_manager.fragment_size;
866 tmp = RREG32(mmVM_L2_CNTL3);
867 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
868 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
869 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
870 WREG32(mmVM_L2_CNTL3, tmp);
871 /* XXX: set to enable PTE/PDE in system memory */
872 tmp = RREG32(mmVM_L2_CNTL4);
873 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
874 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
875 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
876 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
878 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
879 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
880 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
882 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
883 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
885 WREG32(mmVM_L2_CNTL4, tmp);
886 /* setup context0 */
887 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
888 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
889 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
890 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
891 (u32)(adev->dummy_page_addr >> 12));
892 WREG32(mmVM_CONTEXT0_CNTL2, 0);
893 tmp = RREG32(mmVM_CONTEXT0_CNTL);
894 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
895 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
896 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
897 WREG32(mmVM_CONTEXT0_CNTL, tmp);
898
899 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
900 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
901 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
902
903 /* empty context1-15 */
904 /* FIXME start with 4G, once using 2 level pt switch to full
905 * vm size space
906 */
907 /* set vm size, must be a multiple of 4 */
908 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
909 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
910 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
911 if (i < 8)
912 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
913 table_addr >> 12);
914 else
915 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
916 table_addr >> 12);
917 }
918
919 /* enable context1-15 */
920 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
921 (u32)(adev->dummy_page_addr >> 12));
922 WREG32(mmVM_CONTEXT1_CNTL2, 4);
923 tmp = RREG32(mmVM_CONTEXT1_CNTL);
924 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
925 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
926 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
927 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
928 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
929 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
930 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
931 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
932 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
933 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
934 adev->vm_manager.block_size - 9);
935 WREG32(mmVM_CONTEXT1_CNTL, tmp);
936 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
937 gmc_v8_0_set_fault_enable_default(adev, false);
938 else
939 gmc_v8_0_set_fault_enable_default(adev, true);
940
941 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
942 drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
943 (unsigned int)(adev->gmc.gart_size >> 20),
944 (unsigned long long)table_addr);
945 return 0;
946 }
947
gmc_v8_0_gart_init(struct amdgpu_device * adev)948 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
949 {
950 int r;
951
952 if (adev->gart.bo) {
953 WARN(1, "R600 PCIE GART already initialized\n");
954 return 0;
955 }
956 /* Initialize common gart structure */
957 r = amdgpu_gart_init(adev);
958 if (r)
959 return r;
960 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
961 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
962 return amdgpu_gart_table_vram_alloc(adev);
963 }
964
965 /**
966 * gmc_v8_0_gart_disable - gart disable
967 *
968 * @adev: amdgpu_device pointer
969 *
970 * This disables all VM page table (VI).
971 */
gmc_v8_0_gart_disable(struct amdgpu_device * adev)972 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
973 {
974 u32 tmp;
975
976 /* Disable all tables */
977 WREG32(mmVM_CONTEXT0_CNTL, 0);
978 WREG32(mmVM_CONTEXT1_CNTL, 0);
979 /* Setup TLB control */
980 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
981 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
982 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
983 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
984 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
985 /* Setup L2 cache */
986 tmp = RREG32(mmVM_L2_CNTL);
987 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
988 WREG32(mmVM_L2_CNTL, tmp);
989 WREG32(mmVM_L2_CNTL2, 0);
990 }
991
992 /**
993 * gmc_v8_0_vm_decode_fault - print human readable fault info
994 *
995 * @adev: amdgpu_device pointer
996 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
997 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
998 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
999 * @pasid: debug logging only - no functional use
1000 *
1001 * Print human readable fault information (VI).
1002 */
gmc_v8_0_vm_decode_fault(struct amdgpu_device * adev,u32 status,u32 addr,u32 mc_client,unsigned int pasid)1003 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1004 u32 addr, u32 mc_client, unsigned int pasid)
1005 {
1006 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1007 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1008 PROTECTIONS);
1009 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1010 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1011 u32 mc_id;
1012
1013 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1014 MEMORY_CLIENT_ID);
1015
1016 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1017 protections, vmid, pasid, addr,
1018 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1019 MEMORY_CLIENT_RW) ?
1020 "write" : "read", block, mc_client, mc_id);
1021 }
1022
gmc_v8_0_convert_vram_type(int mc_seq_vram_type)1023 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1024 {
1025 switch (mc_seq_vram_type) {
1026 case MC_SEQ_MISC0__MT__GDDR1:
1027 return AMDGPU_VRAM_TYPE_GDDR1;
1028 case MC_SEQ_MISC0__MT__DDR2:
1029 return AMDGPU_VRAM_TYPE_DDR2;
1030 case MC_SEQ_MISC0__MT__GDDR3:
1031 return AMDGPU_VRAM_TYPE_GDDR3;
1032 case MC_SEQ_MISC0__MT__GDDR4:
1033 return AMDGPU_VRAM_TYPE_GDDR4;
1034 case MC_SEQ_MISC0__MT__GDDR5:
1035 return AMDGPU_VRAM_TYPE_GDDR5;
1036 case MC_SEQ_MISC0__MT__HBM:
1037 return AMDGPU_VRAM_TYPE_HBM;
1038 case MC_SEQ_MISC0__MT__DDR3:
1039 return AMDGPU_VRAM_TYPE_DDR3;
1040 default:
1041 return AMDGPU_VRAM_TYPE_UNKNOWN;
1042 }
1043 }
1044
gmc_v8_0_early_init(struct amdgpu_ip_block * ip_block)1045 static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block)
1046 {
1047 struct amdgpu_device *adev = ip_block->adev;
1048
1049 gmc_v8_0_set_gmc_funcs(adev);
1050 gmc_v8_0_set_irq_funcs(adev);
1051
1052 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1053 adev->gmc.shared_aperture_end =
1054 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1055 adev->gmc.private_aperture_start =
1056 adev->gmc.shared_aperture_end + 1;
1057 adev->gmc.private_aperture_end =
1058 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1059 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1060
1061 return 0;
1062 }
1063
gmc_v8_0_late_init(struct amdgpu_ip_block * ip_block)1064 static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block)
1065 {
1066 struct amdgpu_device *adev = ip_block->adev;
1067
1068 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1069 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1070 else
1071 return 0;
1072 }
1073
gmc_v8_0_get_vbios_fb_size(struct amdgpu_device * adev)1074 static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1075 {
1076 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1077 unsigned int size;
1078
1079 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1080 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1081 } else {
1082 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1083
1084 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1085 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1086 4);
1087 }
1088
1089 return size;
1090 }
1091
1092 #define mmMC_SEQ_MISC0_FIJI 0xA71
1093
gmc_v8_0_sw_init(struct amdgpu_ip_block * ip_block)1094 static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
1095 {
1096 int r;
1097 struct amdgpu_device *adev = ip_block->adev;
1098
1099 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1100
1101 if (adev->flags & AMD_IS_APU) {
1102 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1103 } else {
1104 u32 tmp;
1105
1106 if ((adev->asic_type == CHIP_FIJI) ||
1107 (adev->asic_type == CHIP_VEGAM))
1108 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1109 else
1110 tmp = RREG32(mmMC_SEQ_MISC0);
1111 tmp &= MC_SEQ_MISC0__MT__MASK;
1112 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1113 }
1114
1115 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1116 if (r)
1117 return r;
1118
1119 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1120 if (r)
1121 return r;
1122
1123 /* Adjust VM size here.
1124 * Currently set to 4GB ((1 << 20) 4k pages).
1125 * Max GPUVM size for cayman and SI is 40 bits.
1126 */
1127 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1128
1129 /* Set the internal MC address mask
1130 * This is the max address of the GPU's
1131 * internal address space.
1132 */
1133 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1134
1135 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1136 if (r) {
1137 pr_warn("No suitable DMA available\n");
1138 return r;
1139 }
1140 adev->need_swiotlb = drm_need_swiotlb(40);
1141
1142 r = gmc_v8_0_init_microcode(adev);
1143 if (r) {
1144 DRM_ERROR("Failed to load mc firmware!\n");
1145 return r;
1146 }
1147
1148 r = gmc_v8_0_mc_init(adev);
1149 if (r)
1150 return r;
1151
1152 amdgpu_gmc_get_vbios_allocations(adev);
1153
1154 /* Memory manager */
1155 r = amdgpu_bo_init(adev);
1156 if (r)
1157 return r;
1158
1159 r = gmc_v8_0_gart_init(adev);
1160 if (r)
1161 return r;
1162
1163 /*
1164 * number of VMs
1165 * VMID 0 is reserved for System
1166 * amdgpu graphics/compute will use VMIDs 1-7
1167 * amdkfd will use VMIDs 8-15
1168 */
1169 adev->vm_manager.first_kfd_vmid = 8;
1170 amdgpu_vm_manager_init(adev);
1171
1172 /* base offset of vram pages */
1173 if (adev->flags & AMD_IS_APU) {
1174 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1175
1176 tmp <<= 22;
1177 adev->vm_manager.vram_base_offset = tmp;
1178 } else {
1179 adev->vm_manager.vram_base_offset = 0;
1180 }
1181
1182 adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info);
1183 if (!adev->gmc.vm_fault_info)
1184 return -ENOMEM;
1185 atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
1186
1187 return 0;
1188 }
1189
gmc_v8_0_sw_fini(struct amdgpu_ip_block * ip_block)1190 static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
1191 {
1192 struct amdgpu_device *adev = ip_block->adev;
1193
1194 amdgpu_gem_force_release(adev);
1195 amdgpu_vm_manager_fini(adev);
1196 kfree(adev->gmc.vm_fault_info);
1197 amdgpu_gart_table_vram_free(adev);
1198 amdgpu_bo_fini(adev);
1199 amdgpu_ucode_release(&adev->gmc.fw);
1200
1201 return 0;
1202 }
1203
gmc_v8_0_hw_init(struct amdgpu_ip_block * ip_block)1204 static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
1205 {
1206 int r;
1207 struct amdgpu_device *adev = ip_block->adev;
1208
1209 gmc_v8_0_init_golden_registers(adev);
1210
1211 gmc_v8_0_mc_program(adev);
1212
1213 if (adev->asic_type == CHIP_TONGA) {
1214 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1215 if (r) {
1216 DRM_ERROR("Failed to load MC firmware!\n");
1217 return r;
1218 }
1219 } else if (adev->asic_type == CHIP_POLARIS11 ||
1220 adev->asic_type == CHIP_POLARIS10 ||
1221 adev->asic_type == CHIP_POLARIS12) {
1222 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1223 if (r) {
1224 DRM_ERROR("Failed to load MC firmware!\n");
1225 return r;
1226 }
1227 }
1228
1229 r = gmc_v8_0_gart_enable(adev);
1230 if (r)
1231 return r;
1232
1233 if (amdgpu_emu_mode == 1)
1234 return amdgpu_gmc_vram_checking(adev);
1235
1236 return 0;
1237 }
1238
gmc_v8_0_hw_fini(struct amdgpu_ip_block * ip_block)1239 static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
1240 {
1241 struct amdgpu_device *adev = ip_block->adev;
1242
1243 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1244 gmc_v8_0_gart_disable(adev);
1245
1246 return 0;
1247 }
1248
gmc_v8_0_suspend(struct amdgpu_ip_block * ip_block)1249 static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
1250 {
1251 gmc_v8_0_hw_fini(ip_block);
1252
1253 return 0;
1254 }
1255
gmc_v8_0_resume(struct amdgpu_ip_block * ip_block)1256 static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
1257 {
1258 int r;
1259
1260 r = gmc_v8_0_hw_init(ip_block);
1261 if (r)
1262 return r;
1263
1264 amdgpu_vmid_reset_all(ip_block->adev);
1265
1266 return 0;
1267 }
1268
gmc_v8_0_is_idle(struct amdgpu_ip_block * ip_block)1269 static bool gmc_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
1270 {
1271 struct amdgpu_device *adev = ip_block->adev;
1272 u32 tmp = RREG32(mmSRBM_STATUS);
1273
1274 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1275 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1276 return false;
1277
1278 return true;
1279 }
1280
gmc_v8_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1281 static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1282 {
1283 unsigned int i;
1284 u32 tmp;
1285 struct amdgpu_device *adev = ip_block->adev;
1286
1287 for (i = 0; i < adev->usec_timeout; i++) {
1288 /* read MC_STATUS */
1289 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1290 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1291 SRBM_STATUS__MCC_BUSY_MASK |
1292 SRBM_STATUS__MCD_BUSY_MASK |
1293 SRBM_STATUS__VMC_BUSY_MASK |
1294 SRBM_STATUS__VMC1_BUSY_MASK);
1295 if (!tmp)
1296 return 0;
1297 udelay(1);
1298 }
1299 return -ETIMEDOUT;
1300
1301 }
1302
gmc_v8_0_check_soft_reset(struct amdgpu_ip_block * ip_block)1303 static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
1304 {
1305 u32 srbm_soft_reset = 0;
1306 struct amdgpu_device *adev = ip_block->adev;
1307 u32 tmp = RREG32(mmSRBM_STATUS);
1308
1309 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1310 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1311 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1312
1313 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1314 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1315 if (!(adev->flags & AMD_IS_APU))
1316 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1317 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1318 }
1319
1320 if (srbm_soft_reset) {
1321 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1322 return true;
1323 }
1324
1325 adev->gmc.srbm_soft_reset = 0;
1326
1327 return false;
1328 }
1329
gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block * ip_block)1330 static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
1331 {
1332 struct amdgpu_device *adev = ip_block->adev;
1333
1334 if (!adev->gmc.srbm_soft_reset)
1335 return 0;
1336
1337 gmc_v8_0_mc_stop(adev);
1338 if (gmc_v8_0_wait_for_idle(ip_block))
1339 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1340
1341 return 0;
1342 }
1343
gmc_v8_0_soft_reset(struct amdgpu_ip_block * ip_block)1344 static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
1345 {
1346 struct amdgpu_device *adev = ip_block->adev;
1347 u32 srbm_soft_reset;
1348
1349 if (!adev->gmc.srbm_soft_reset)
1350 return 0;
1351 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1352
1353 if (srbm_soft_reset) {
1354 u32 tmp;
1355
1356 tmp = RREG32(mmSRBM_SOFT_RESET);
1357 tmp |= srbm_soft_reset;
1358 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1359 WREG32(mmSRBM_SOFT_RESET, tmp);
1360 tmp = RREG32(mmSRBM_SOFT_RESET);
1361
1362 udelay(50);
1363
1364 tmp &= ~srbm_soft_reset;
1365 WREG32(mmSRBM_SOFT_RESET, tmp);
1366 tmp = RREG32(mmSRBM_SOFT_RESET);
1367
1368 /* Wait a little for things to settle down */
1369 udelay(50);
1370 }
1371
1372 return 0;
1373 }
1374
gmc_v8_0_post_soft_reset(struct amdgpu_ip_block * ip_block)1375 static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
1376 {
1377 struct amdgpu_device *adev = ip_block->adev;
1378
1379 if (!adev->gmc.srbm_soft_reset)
1380 return 0;
1381
1382 gmc_v8_0_mc_resume(adev);
1383 return 0;
1384 }
1385
gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)1386 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1387 struct amdgpu_irq_src *src,
1388 unsigned int type,
1389 enum amdgpu_interrupt_state state)
1390 {
1391 u32 tmp;
1392 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1393 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1394 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1395 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1396 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1397 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1398 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1399
1400 switch (state) {
1401 case AMDGPU_IRQ_STATE_DISABLE:
1402 /* system context */
1403 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1404 tmp &= ~bits;
1405 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1406 /* VMs */
1407 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1408 tmp &= ~bits;
1409 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1410 break;
1411 case AMDGPU_IRQ_STATE_ENABLE:
1412 /* system context */
1413 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1414 tmp |= bits;
1415 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1416 /* VMs */
1417 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1418 tmp |= bits;
1419 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1420 break;
1421 default:
1422 break;
1423 }
1424
1425 return 0;
1426 }
1427
gmc_v8_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1428 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1429 struct amdgpu_irq_src *source,
1430 struct amdgpu_iv_entry *entry)
1431 {
1432 u32 addr, status, mc_client, vmid;
1433
1434 if (amdgpu_sriov_vf(adev)) {
1435 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1436 entry->src_id, entry->src_data[0]);
1437 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1438 return 0;
1439 }
1440
1441 /* Delegate to the soft IRQ handler ring */
1442 if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
1443 amdgpu_irq_delegate(adev, entry, 4);
1444 return 1;
1445 }
1446
1447 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1448 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1449 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1450 /* reset addr and status */
1451 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1452
1453 if (!addr && !status)
1454 return 0;
1455
1456 amdgpu_vm_update_fault_cache(adev, entry->pasid,
1457 ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0));
1458
1459 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1460 gmc_v8_0_set_fault_enable_default(adev, false);
1461
1462 if (printk_ratelimit()) {
1463 struct amdgpu_task_info *task_info;
1464
1465 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1466 entry->src_id, entry->src_data[0]);
1467
1468 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
1469 if (task_info) {
1470 amdgpu_vm_print_task_info(adev, task_info);
1471 amdgpu_vm_put_task_info(task_info);
1472 }
1473
1474 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1475 addr);
1476 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1477 status);
1478
1479 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1480 entry->pasid);
1481 }
1482
1483 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1484 VMID);
1485 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1486 && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
1487 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1488 u32 protections = REG_GET_FIELD(status,
1489 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1490 PROTECTIONS);
1491
1492 info->vmid = vmid;
1493 info->mc_id = REG_GET_FIELD(status,
1494 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1495 MEMORY_CLIENT_ID);
1496 info->status = status;
1497 info->page_addr = addr;
1498 info->prot_valid = protections & 0x7 ? true : false;
1499 info->prot_read = protections & 0x8 ? true : false;
1500 info->prot_write = protections & 0x10 ? true : false;
1501 info->prot_exec = protections & 0x20 ? true : false;
1502 atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
1503 }
1504
1505 return 0;
1506 }
1507
fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)1508 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1509 bool enable)
1510 {
1511 uint32_t data;
1512
1513 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1514 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1515 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1516 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1517
1518 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1519 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1520 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1521
1522 data = RREG32(mmMC_HUB_MISC_VM_CG);
1523 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1524 WREG32(mmMC_HUB_MISC_VM_CG, data);
1525
1526 data = RREG32(mmMC_XPB_CLK_GAT);
1527 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1528 WREG32(mmMC_XPB_CLK_GAT, data);
1529
1530 data = RREG32(mmATC_MISC_CG);
1531 data |= ATC_MISC_CG__ENABLE_MASK;
1532 WREG32(mmATC_MISC_CG, data);
1533
1534 data = RREG32(mmMC_CITF_MISC_WR_CG);
1535 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1536 WREG32(mmMC_CITF_MISC_WR_CG, data);
1537
1538 data = RREG32(mmMC_CITF_MISC_RD_CG);
1539 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1540 WREG32(mmMC_CITF_MISC_RD_CG, data);
1541
1542 data = RREG32(mmMC_CITF_MISC_VM_CG);
1543 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1544 WREG32(mmMC_CITF_MISC_VM_CG, data);
1545
1546 data = RREG32(mmVM_L2_CG);
1547 data |= VM_L2_CG__ENABLE_MASK;
1548 WREG32(mmVM_L2_CG, data);
1549 } else {
1550 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1551 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1552 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1553
1554 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1555 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1556 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1557
1558 data = RREG32(mmMC_HUB_MISC_VM_CG);
1559 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1560 WREG32(mmMC_HUB_MISC_VM_CG, data);
1561
1562 data = RREG32(mmMC_XPB_CLK_GAT);
1563 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1564 WREG32(mmMC_XPB_CLK_GAT, data);
1565
1566 data = RREG32(mmATC_MISC_CG);
1567 data &= ~ATC_MISC_CG__ENABLE_MASK;
1568 WREG32(mmATC_MISC_CG, data);
1569
1570 data = RREG32(mmMC_CITF_MISC_WR_CG);
1571 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1572 WREG32(mmMC_CITF_MISC_WR_CG, data);
1573
1574 data = RREG32(mmMC_CITF_MISC_RD_CG);
1575 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1576 WREG32(mmMC_CITF_MISC_RD_CG, data);
1577
1578 data = RREG32(mmMC_CITF_MISC_VM_CG);
1579 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1580 WREG32(mmMC_CITF_MISC_VM_CG, data);
1581
1582 data = RREG32(mmVM_L2_CG);
1583 data &= ~VM_L2_CG__ENABLE_MASK;
1584 WREG32(mmVM_L2_CG, data);
1585 }
1586 }
1587
fiji_update_mc_light_sleep(struct amdgpu_device * adev,bool enable)1588 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1589 bool enable)
1590 {
1591 uint32_t data;
1592
1593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1594 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1595 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1596 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1597
1598 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1599 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1600 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1601
1602 data = RREG32(mmMC_HUB_MISC_VM_CG);
1603 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1604 WREG32(mmMC_HUB_MISC_VM_CG, data);
1605
1606 data = RREG32(mmMC_XPB_CLK_GAT);
1607 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1608 WREG32(mmMC_XPB_CLK_GAT, data);
1609
1610 data = RREG32(mmATC_MISC_CG);
1611 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1612 WREG32(mmATC_MISC_CG, data);
1613
1614 data = RREG32(mmMC_CITF_MISC_WR_CG);
1615 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1616 WREG32(mmMC_CITF_MISC_WR_CG, data);
1617
1618 data = RREG32(mmMC_CITF_MISC_RD_CG);
1619 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1620 WREG32(mmMC_CITF_MISC_RD_CG, data);
1621
1622 data = RREG32(mmMC_CITF_MISC_VM_CG);
1623 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1624 WREG32(mmMC_CITF_MISC_VM_CG, data);
1625
1626 data = RREG32(mmVM_L2_CG);
1627 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1628 WREG32(mmVM_L2_CG, data);
1629 } else {
1630 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1631 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1632 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1633
1634 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1635 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1636 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1637
1638 data = RREG32(mmMC_HUB_MISC_VM_CG);
1639 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1640 WREG32(mmMC_HUB_MISC_VM_CG, data);
1641
1642 data = RREG32(mmMC_XPB_CLK_GAT);
1643 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1644 WREG32(mmMC_XPB_CLK_GAT, data);
1645
1646 data = RREG32(mmATC_MISC_CG);
1647 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1648 WREG32(mmATC_MISC_CG, data);
1649
1650 data = RREG32(mmMC_CITF_MISC_WR_CG);
1651 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1652 WREG32(mmMC_CITF_MISC_WR_CG, data);
1653
1654 data = RREG32(mmMC_CITF_MISC_RD_CG);
1655 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1656 WREG32(mmMC_CITF_MISC_RD_CG, data);
1657
1658 data = RREG32(mmMC_CITF_MISC_VM_CG);
1659 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1660 WREG32(mmMC_CITF_MISC_VM_CG, data);
1661
1662 data = RREG32(mmVM_L2_CG);
1663 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1664 WREG32(mmVM_L2_CG, data);
1665 }
1666 }
1667
gmc_v8_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1668 static int gmc_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1669 enum amd_clockgating_state state)
1670 {
1671 struct amdgpu_device *adev = ip_block->adev;
1672
1673 if (amdgpu_sriov_vf(adev))
1674 return 0;
1675
1676 switch (adev->asic_type) {
1677 case CHIP_FIJI:
1678 fiji_update_mc_medium_grain_clock_gating(adev,
1679 state == AMD_CG_STATE_GATE);
1680 fiji_update_mc_light_sleep(adev,
1681 state == AMD_CG_STATE_GATE);
1682 break;
1683 default:
1684 break;
1685 }
1686 return 0;
1687 }
1688
gmc_v8_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1689 static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1690 enum amd_powergating_state state)
1691 {
1692 return 0;
1693 }
1694
gmc_v8_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)1695 static void gmc_v8_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1696 {
1697 struct amdgpu_device *adev = ip_block->adev;
1698 int data;
1699
1700 if (amdgpu_sriov_vf(adev))
1701 *flags = 0;
1702
1703 /* AMD_CG_SUPPORT_MC_MGCG */
1704 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1705 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1706 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1707
1708 /* AMD_CG_SUPPORT_MC_LS */
1709 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1710 *flags |= AMD_CG_SUPPORT_MC_LS;
1711 }
1712
1713 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1714 .name = "gmc_v8_0",
1715 .early_init = gmc_v8_0_early_init,
1716 .late_init = gmc_v8_0_late_init,
1717 .sw_init = gmc_v8_0_sw_init,
1718 .sw_fini = gmc_v8_0_sw_fini,
1719 .hw_init = gmc_v8_0_hw_init,
1720 .hw_fini = gmc_v8_0_hw_fini,
1721 .suspend = gmc_v8_0_suspend,
1722 .resume = gmc_v8_0_resume,
1723 .is_idle = gmc_v8_0_is_idle,
1724 .wait_for_idle = gmc_v8_0_wait_for_idle,
1725 .check_soft_reset = gmc_v8_0_check_soft_reset,
1726 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1727 .soft_reset = gmc_v8_0_soft_reset,
1728 .post_soft_reset = gmc_v8_0_post_soft_reset,
1729 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1730 .set_powergating_state = gmc_v8_0_set_powergating_state,
1731 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1732 };
1733
1734 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1735 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1736 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1737 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1738 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1739 .set_prt = gmc_v8_0_set_prt,
1740 .get_vm_pde = gmc_v8_0_get_vm_pde,
1741 .get_vm_pte = gmc_v8_0_get_vm_pte,
1742 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1743 };
1744
1745 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1746 .set = gmc_v8_0_vm_fault_interrupt_state,
1747 .process = gmc_v8_0_process_interrupt,
1748 };
1749
gmc_v8_0_set_gmc_funcs(struct amdgpu_device * adev)1750 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1751 {
1752 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1753 }
1754
gmc_v8_0_set_irq_funcs(struct amdgpu_device * adev)1755 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1756 {
1757 adev->gmc.vm_fault.num_types = 1;
1758 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1759 }
1760
1761 const struct amdgpu_ip_block_version gmc_v8_0_ip_block = {
1762 .type = AMD_IP_BLOCK_TYPE_GMC,
1763 .major = 8,
1764 .minor = 0,
1765 .rev = 0,
1766 .funcs = &gmc_v8_0_ip_funcs,
1767 };
1768
1769 const struct amdgpu_ip_block_version gmc_v8_1_ip_block = {
1770 .type = AMD_IP_BLOCK_TYPE_GMC,
1771 .major = 8,
1772 .minor = 1,
1773 .rev = 0,
1774 .funcs = &gmc_v8_0_ip_funcs,
1775 };
1776
1777 const struct amdgpu_ip_block_version gmc_v8_5_ip_block = {
1778 .type = AMD_IP_BLOCK_TYPE_GMC,
1779 .major = 8,
1780 .minor = 5,
1781 .rev = 0,
1782 .funcs = &gmc_v8_0_ip_funcs,
1783 };
1784