xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c (revision 593043d35ddff8ab033546c2a89bb1d4080d03e1)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "gmc_v8_0.h"
27 #include "amdgpu_ucode.h"
28 
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
31 
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
34 
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 
38 #include "vid.h"
39 #include "vi.h"
40 
41 #include "amdgpu_atombios.h"
42 
43 
44 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
45 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
46 static int gmc_v8_0_wait_for_idle(void *handle);
47 
48 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
49 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
50 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
51 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
52 
53 static const u32 golden_settings_tonga_a11[] =
54 {
55 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
56 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
57 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
58 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
59 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
60 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
61 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
62 };
63 
64 static const u32 tonga_mgcg_cgcg_init[] =
65 {
66 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
67 };
68 
69 static const u32 golden_settings_fiji_a10[] =
70 {
71 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 };
76 
77 static const u32 fiji_mgcg_cgcg_init[] =
78 {
79 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
80 };
81 
82 static const u32 golden_settings_polaris11_a11[] =
83 {
84 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
88 };
89 
90 static const u32 golden_settings_polaris10_a11[] =
91 {
92 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
93 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
95 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
96 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
97 };
98 
99 static const u32 cz_mgcg_cgcg_init[] =
100 {
101 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
102 };
103 
104 static const u32 stoney_mgcg_cgcg_init[] =
105 {
106 	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
107 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
108 };
109 
110 static const u32 golden_settings_stoney_common[] =
111 {
112 	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
113 	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
114 };
115 
116 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
117 {
118 	switch (adev->asic_type) {
119 	case CHIP_FIJI:
120 		amdgpu_program_register_sequence(adev,
121 						 fiji_mgcg_cgcg_init,
122 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
123 		amdgpu_program_register_sequence(adev,
124 						 golden_settings_fiji_a10,
125 						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
126 		break;
127 	case CHIP_TONGA:
128 		amdgpu_program_register_sequence(adev,
129 						 tonga_mgcg_cgcg_init,
130 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
131 		amdgpu_program_register_sequence(adev,
132 						 golden_settings_tonga_a11,
133 						 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
134 		break;
135 	case CHIP_POLARIS11:
136 	case CHIP_POLARIS12:
137 		amdgpu_program_register_sequence(adev,
138 						 golden_settings_polaris11_a11,
139 						 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
140 		break;
141 	case CHIP_POLARIS10:
142 		amdgpu_program_register_sequence(adev,
143 						 golden_settings_polaris10_a11,
144 						 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
145 		break;
146 	case CHIP_CARRIZO:
147 		amdgpu_program_register_sequence(adev,
148 						 cz_mgcg_cgcg_init,
149 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
150 		break;
151 	case CHIP_STONEY:
152 		amdgpu_program_register_sequence(adev,
153 						 stoney_mgcg_cgcg_init,
154 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
155 		amdgpu_program_register_sequence(adev,
156 						 golden_settings_stoney_common,
157 						 (const u32)ARRAY_SIZE(golden_settings_stoney_common));
158 		break;
159 	default:
160 		break;
161 	}
162 }
163 
164 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
165 			     struct amdgpu_mode_mc_save *save)
166 {
167 	u32 blackout;
168 
169 	if (adev->mode_info.num_crtc)
170 		amdgpu_display_stop_mc_access(adev, save);
171 
172 	gmc_v8_0_wait_for_idle(adev);
173 
174 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
175 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
176 		/* Block CPU access */
177 		WREG32(mmBIF_FB_EN, 0);
178 		/* blackout the MC */
179 		blackout = REG_SET_FIELD(blackout,
180 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
181 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
182 	}
183 	/* wait for the MC to settle */
184 	udelay(100);
185 }
186 
187 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
188 			       struct amdgpu_mode_mc_save *save)
189 {
190 	u32 tmp;
191 
192 	/* unblackout the MC */
193 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
194 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
195 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
196 	/* allow CPU access */
197 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
198 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
199 	WREG32(mmBIF_FB_EN, tmp);
200 
201 	if (adev->mode_info.num_crtc)
202 		amdgpu_display_resume_mc_access(adev, save);
203 }
204 
205 /**
206  * gmc_v8_0_init_microcode - load ucode images from disk
207  *
208  * @adev: amdgpu_device pointer
209  *
210  * Use the firmware interface to load the ucode images into
211  * the driver (not loaded into hw).
212  * Returns 0 on success, error on failure.
213  */
214 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
215 {
216 	const char *chip_name;
217 	char fw_name[30];
218 	int err;
219 
220 	DRM_DEBUG("\n");
221 
222 	switch (adev->asic_type) {
223 	case CHIP_TONGA:
224 		chip_name = "tonga";
225 		break;
226 	case CHIP_POLARIS11:
227 		chip_name = "polaris11";
228 		break;
229 	case CHIP_POLARIS10:
230 		chip_name = "polaris10";
231 		break;
232 	case CHIP_POLARIS12:
233 		chip_name = "polaris12";
234 		break;
235 	case CHIP_FIJI:
236 	case CHIP_CARRIZO:
237 	case CHIP_STONEY:
238 		return 0;
239 	default: BUG();
240 	}
241 
242 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
243 	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
244 	if (err)
245 		goto out;
246 	err = amdgpu_ucode_validate(adev->mc.fw);
247 
248 out:
249 	if (err) {
250 		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
251 		release_firmware(adev->mc.fw);
252 		adev->mc.fw = NULL;
253 	}
254 	return err;
255 }
256 
257 /**
258  * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
259  *
260  * @adev: amdgpu_device pointer
261  *
262  * Load the GDDR MC ucode into the hw (CIK).
263  * Returns 0 on success, error on failure.
264  */
265 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
266 {
267 	const struct mc_firmware_header_v1_0 *hdr;
268 	const __le32 *fw_data = NULL;
269 	const __le32 *io_mc_regs = NULL;
270 	u32 running;
271 	int i, ucode_size, regs_size;
272 
273 	/* Skip MC ucode loading on SR-IOV capable boards.
274 	 * vbios does this for us in asic_init in that case.
275 	 * Skip MC ucode loading on VF, because hypervisor will do that
276 	 * for this adaptor.
277 	 */
278 	if (amdgpu_sriov_bios(adev))
279 		return 0;
280 
281 	if (!adev->mc.fw)
282 		return -EINVAL;
283 
284 	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
285 	amdgpu_ucode_print_mc_hdr(&hdr->header);
286 
287 	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
288 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
289 	io_mc_regs = (const __le32 *)
290 		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
291 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
292 	fw_data = (const __le32 *)
293 		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
294 
295 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
296 
297 	if (running == 0) {
298 		/* reset the engine and set to writable */
299 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
300 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
301 
302 		/* load mc io regs */
303 		for (i = 0; i < regs_size; i++) {
304 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
305 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
306 		}
307 		/* load the MC ucode */
308 		for (i = 0; i < ucode_size; i++)
309 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
310 
311 		/* put the engine back into the active state */
312 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
313 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
314 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
315 
316 		/* wait for training to complete */
317 		for (i = 0; i < adev->usec_timeout; i++) {
318 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
319 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
320 				break;
321 			udelay(1);
322 		}
323 		for (i = 0; i < adev->usec_timeout; i++) {
324 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
325 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
326 				break;
327 			udelay(1);
328 		}
329 	}
330 
331 	return 0;
332 }
333 
334 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
335 {
336 	const struct mc_firmware_header_v1_0 *hdr;
337 	const __le32 *fw_data = NULL;
338 	const __le32 *io_mc_regs = NULL;
339 	u32 data, vbios_version;
340 	int i, ucode_size, regs_size;
341 
342 	/* Skip MC ucode loading on SR-IOV capable boards.
343 	 * vbios does this for us in asic_init in that case.
344 	 * Skip MC ucode loading on VF, because hypervisor will do that
345 	 * for this adaptor.
346 	 */
347 	if (amdgpu_sriov_bios(adev))
348 		return 0;
349 
350 	WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
351 	data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
352 	vbios_version = data & 0xf;
353 
354 	if (vbios_version == 0)
355 		return 0;
356 
357 	if (!adev->mc.fw)
358 		return -EINVAL;
359 
360 	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
361 	amdgpu_ucode_print_mc_hdr(&hdr->header);
362 
363 	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
364 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
365 	io_mc_regs = (const __le32 *)
366 		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
367 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
368 	fw_data = (const __le32 *)
369 		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
370 
371 	data = RREG32(mmMC_SEQ_MISC0);
372 	data &= ~(0x40);
373 	WREG32(mmMC_SEQ_MISC0, data);
374 
375 	/* load mc io regs */
376 	for (i = 0; i < regs_size; i++) {
377 		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
378 		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
379 	}
380 
381 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
382 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
383 
384 	/* load the MC ucode */
385 	for (i = 0; i < ucode_size; i++)
386 		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
387 
388 	/* put the engine back into the active state */
389 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
390 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
391 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
392 
393 	/* wait for training to complete */
394 	for (i = 0; i < adev->usec_timeout; i++) {
395 		data = RREG32(mmMC_SEQ_MISC0);
396 		if (data & 0x80)
397 			break;
398 		udelay(1);
399 	}
400 
401 	return 0;
402 }
403 
404 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
405 				       struct amdgpu_mc *mc)
406 {
407 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
408 		/* leave room for at least 1024M GTT */
409 		dev_warn(adev->dev, "limiting VRAM\n");
410 		mc->real_vram_size = 0xFFC0000000ULL;
411 		mc->mc_vram_size = 0xFFC0000000ULL;
412 	}
413 	amdgpu_vram_location(adev, &adev->mc, 0);
414 	adev->mc.gtt_base_align = 0;
415 	amdgpu_gtt_location(adev, mc);
416 }
417 
418 /**
419  * gmc_v8_0_mc_program - program the GPU memory controller
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * Set the location of vram, gart, and AGP in the GPU's
424  * physical address space (CIK).
425  */
426 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
427 {
428 	struct amdgpu_mode_mc_save save;
429 	u32 tmp;
430 	int i, j;
431 
432 	/* Initialize HDP */
433 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
434 		WREG32((0xb05 + j), 0x00000000);
435 		WREG32((0xb06 + j), 0x00000000);
436 		WREG32((0xb07 + j), 0x00000000);
437 		WREG32((0xb08 + j), 0x00000000);
438 		WREG32((0xb09 + j), 0x00000000);
439 	}
440 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
441 
442 	if (adev->mode_info.num_crtc)
443 		amdgpu_display_set_vga_render_state(adev, false);
444 
445 	gmc_v8_0_mc_stop(adev, &save);
446 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
447 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
448 	}
449 	/* Update configuration */
450 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
451 	       adev->mc.vram_start >> 12);
452 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
453 	       adev->mc.vram_end >> 12);
454 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
455 	       adev->vram_scratch.gpu_addr >> 12);
456 	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
457 	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
458 	WREG32(mmMC_VM_FB_LOCATION, tmp);
459 	/* XXX double check these! */
460 	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
461 	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
462 	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
463 	WREG32(mmMC_VM_AGP_BASE, 0);
464 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
465 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
466 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
467 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
468 	}
469 	gmc_v8_0_mc_resume(adev, &save);
470 
471 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
472 
473 	tmp = RREG32(mmHDP_MISC_CNTL);
474 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
475 	WREG32(mmHDP_MISC_CNTL, tmp);
476 
477 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
478 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
479 }
480 
481 /**
482  * gmc_v8_0_mc_init - initialize the memory controller driver params
483  *
484  * @adev: amdgpu_device pointer
485  *
486  * Look up the amount of vram, vram width, and decide how to place
487  * vram and gart within the GPU's physical address space (CIK).
488  * Returns 0 for success.
489  */
490 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
491 {
492 	adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
493 	if (!adev->mc.vram_width) {
494 		u32 tmp;
495 		int chansize, numchan;
496 
497 		/* Get VRAM informations */
498 		tmp = RREG32(mmMC_ARB_RAMCFG);
499 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
500 			chansize = 64;
501 		} else {
502 			chansize = 32;
503 		}
504 		tmp = RREG32(mmMC_SHARED_CHMAP);
505 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
506 		case 0:
507 		default:
508 			numchan = 1;
509 			break;
510 		case 1:
511 			numchan = 2;
512 			break;
513 		case 2:
514 			numchan = 4;
515 			break;
516 		case 3:
517 			numchan = 8;
518 			break;
519 		case 4:
520 			numchan = 3;
521 			break;
522 		case 5:
523 			numchan = 6;
524 			break;
525 		case 6:
526 			numchan = 10;
527 			break;
528 		case 7:
529 			numchan = 12;
530 			break;
531 		case 8:
532 			numchan = 16;
533 			break;
534 		}
535 		adev->mc.vram_width = numchan * chansize;
536 	}
537 	/* Could aper size report 0 ? */
538 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
539 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
540 	/* size in MB on si */
541 	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
542 	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
543 
544 #ifdef CONFIG_X86_64
545 	if (adev->flags & AMD_IS_APU) {
546 		adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
547 		adev->mc.aper_size = adev->mc.real_vram_size;
548 	}
549 #endif
550 
551 	/* In case the PCI BAR is larger than the actual amount of vram */
552 	adev->mc.visible_vram_size = adev->mc.aper_size;
553 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
554 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
555 
556 	/* unless the user had overridden it, set the gart
557 	 * size equal to the 1024 or vram, whichever is larger.
558 	 */
559 	if (amdgpu_gart_size == -1)
560 		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
561 	else
562 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
563 
564 	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
565 
566 	return 0;
567 }
568 
569 /*
570  * GART
571  * VMID 0 is the physical GPU addresses as used by the kernel.
572  * VMIDs 1-15 are used for userspace clients and are handled
573  * by the amdgpu vm/hsa code.
574  */
575 
576 /**
577  * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
578  *
579  * @adev: amdgpu_device pointer
580  * @vmid: vm instance to flush
581  *
582  * Flush the TLB for the requested page table (CIK).
583  */
584 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
585 					uint32_t vmid)
586 {
587 	/* flush hdp cache */
588 	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
589 
590 	/* bits 0-15 are the VM contexts0-15 */
591 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
592 }
593 
594 /**
595  * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
596  *
597  * @adev: amdgpu_device pointer
598  * @cpu_pt_addr: cpu address of the page table
599  * @gpu_page_idx: entry in the page table to update
600  * @addr: dst addr to write into pte/pde
601  * @flags: access flags
602  *
603  * Update the page tables using the CPU.
604  */
605 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
606 				     void *cpu_pt_addr,
607 				     uint32_t gpu_page_idx,
608 				     uint64_t addr,
609 				     uint64_t flags)
610 {
611 	void __iomem *ptr = (void *)cpu_pt_addr;
612 	uint64_t value;
613 
614 	/*
615 	 * PTE format on VI:
616 	 * 63:40 reserved
617 	 * 39:12 4k physical page base address
618 	 * 11:7 fragment
619 	 * 6 write
620 	 * 5 read
621 	 * 4 exe
622 	 * 3 reserved
623 	 * 2 snooped
624 	 * 1 system
625 	 * 0 valid
626 	 *
627 	 * PDE format on VI:
628 	 * 63:59 block fragment size
629 	 * 58:40 reserved
630 	 * 39:1 physical base address of PTE
631 	 * bits 5:1 must be 0.
632 	 * 0 valid
633 	 */
634 	value = addr & 0x000000FFFFFFF000ULL;
635 	value |= flags;
636 	writeq(value, ptr + (gpu_page_idx * 8));
637 
638 	return 0;
639 }
640 
641 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
642 					  uint32_t flags)
643 {
644 	uint64_t pte_flag = 0;
645 
646 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
647 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
648 	if (flags & AMDGPU_VM_PAGE_READABLE)
649 		pte_flag |= AMDGPU_PTE_READABLE;
650 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
651 		pte_flag |= AMDGPU_PTE_WRITEABLE;
652 	if (flags & AMDGPU_VM_PAGE_PRT)
653 		pte_flag |= AMDGPU_PTE_PRT;
654 
655 	return pte_flag;
656 }
657 
658 /**
659  * gmc_v8_0_set_fault_enable_default - update VM fault handling
660  *
661  * @adev: amdgpu_device pointer
662  * @value: true redirects VM faults to the default page
663  */
664 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
665 					      bool value)
666 {
667 	u32 tmp;
668 
669 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
670 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
671 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
672 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
673 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
674 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
675 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
676 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
677 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
678 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
679 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
680 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
681 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
682 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
683 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
684 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
685 }
686 
687 /**
688  * gmc_v8_0_set_prt - set PRT VM fault
689  *
690  * @adev: amdgpu_device pointer
691  * @enable: enable/disable VM fault handling for PRT
692 */
693 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
694 {
695 	u32 tmp;
696 
697 	if (enable && !adev->mc.prt_warning) {
698 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
699 		adev->mc.prt_warning = true;
700 	}
701 
702 	tmp = RREG32(mmVM_PRT_CNTL);
703 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
704 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
705 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
706 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
707 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
708 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
709 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
710 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
711 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
712 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
713 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
714 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
715 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
716 			    MASK_PDE0_FAULT, enable);
717 	WREG32(mmVM_PRT_CNTL, tmp);
718 
719 	if (enable) {
720 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
721 		uint32_t high = adev->vm_manager.max_pfn;
722 
723 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
724 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
725 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
726 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
727 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
728 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
729 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
730 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
731 	} else {
732 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
733 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
734 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
735 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
736 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
737 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
738 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
739 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
740 	}
741 }
742 
743 /**
744  * gmc_v8_0_gart_enable - gart enable
745  *
746  * @adev: amdgpu_device pointer
747  *
748  * This sets up the TLBs, programs the page tables for VMID0,
749  * sets up the hw for VMIDs 1-15 which are allocated on
750  * demand, and sets up the global locations for the LDS, GDS,
751  * and GPUVM for FSA64 clients (CIK).
752  * Returns 0 for success, errors for failure.
753  */
754 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
755 {
756 	int r, i;
757 	u32 tmp;
758 
759 	if (adev->gart.robj == NULL) {
760 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
761 		return -EINVAL;
762 	}
763 	r = amdgpu_gart_table_vram_pin(adev);
764 	if (r)
765 		return r;
766 	/* Setup TLB control */
767 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
768 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
769 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
770 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
771 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
772 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
773 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
774 	/* Setup L2 cache */
775 	tmp = RREG32(mmVM_L2_CNTL);
776 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
777 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
778 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
779 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
780 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
781 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
782 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
783 	WREG32(mmVM_L2_CNTL, tmp);
784 	tmp = RREG32(mmVM_L2_CNTL2);
785 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
786 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
787 	WREG32(mmVM_L2_CNTL2, tmp);
788 	tmp = RREG32(mmVM_L2_CNTL3);
789 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
790 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
791 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
792 	WREG32(mmVM_L2_CNTL3, tmp);
793 	/* XXX: set to enable PTE/PDE in system memory */
794 	tmp = RREG32(mmVM_L2_CNTL4);
795 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
796 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
797 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
798 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
799 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
800 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
801 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
802 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
803 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
804 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
805 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
806 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
807 	WREG32(mmVM_L2_CNTL4, tmp);
808 	/* setup context0 */
809 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
810 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
811 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
812 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
813 			(u32)(adev->dummy_page.addr >> 12));
814 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
815 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
816 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
817 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
818 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
819 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
820 
821 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
822 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
823 	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
824 
825 	/* empty context1-15 */
826 	/* FIXME start with 4G, once using 2 level pt switch to full
827 	 * vm size space
828 	 */
829 	/* set vm size, must be a multiple of 4 */
830 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
831 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
832 	for (i = 1; i < 16; i++) {
833 		if (i < 8)
834 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
835 			       adev->gart.table_addr >> 12);
836 		else
837 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
838 			       adev->gart.table_addr >> 12);
839 	}
840 
841 	/* enable context1-15 */
842 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
843 	       (u32)(adev->dummy_page.addr >> 12));
844 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
845 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
846 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
847 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
848 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
849 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
850 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
851 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
852 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
853 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
854 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
855 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
856 			    adev->vm_manager.block_size - 9);
857 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
858 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
859 		gmc_v8_0_set_fault_enable_default(adev, false);
860 	else
861 		gmc_v8_0_set_fault_enable_default(adev, true);
862 
863 	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
864 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
865 		 (unsigned)(adev->mc.gtt_size >> 20),
866 		 (unsigned long long)adev->gart.table_addr);
867 	adev->gart.ready = true;
868 	return 0;
869 }
870 
871 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
872 {
873 	int r;
874 
875 	if (adev->gart.robj) {
876 		WARN(1, "R600 PCIE GART already initialized\n");
877 		return 0;
878 	}
879 	/* Initialize common gart structure */
880 	r = amdgpu_gart_init(adev);
881 	if (r)
882 		return r;
883 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
884 	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
885 	return amdgpu_gart_table_vram_alloc(adev);
886 }
887 
888 /**
889  * gmc_v8_0_gart_disable - gart disable
890  *
891  * @adev: amdgpu_device pointer
892  *
893  * This disables all VM page table (CIK).
894  */
895 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
896 {
897 	u32 tmp;
898 
899 	/* Disable all tables */
900 	WREG32(mmVM_CONTEXT0_CNTL, 0);
901 	WREG32(mmVM_CONTEXT1_CNTL, 0);
902 	/* Setup TLB control */
903 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
904 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
905 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
906 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
907 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
908 	/* Setup L2 cache */
909 	tmp = RREG32(mmVM_L2_CNTL);
910 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
911 	WREG32(mmVM_L2_CNTL, tmp);
912 	WREG32(mmVM_L2_CNTL2, 0);
913 	amdgpu_gart_table_vram_unpin(adev);
914 }
915 
916 /**
917  * gmc_v8_0_gart_fini - vm fini callback
918  *
919  * @adev: amdgpu_device pointer
920  *
921  * Tears down the driver GART/VM setup (CIK).
922  */
923 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
924 {
925 	amdgpu_gart_table_vram_free(adev);
926 	amdgpu_gart_fini(adev);
927 }
928 
929 /*
930  * vm
931  * VMID 0 is the physical GPU addresses as used by the kernel.
932  * VMIDs 1-15 are used for userspace clients and are handled
933  * by the amdgpu vm/hsa code.
934  */
935 /**
936  * gmc_v8_0_vm_init - cik vm init callback
937  *
938  * @adev: amdgpu_device pointer
939  *
940  * Inits cik specific vm parameters (number of VMs, base of vram for
941  * VMIDs 1-15) (CIK).
942  * Returns 0 for success.
943  */
944 static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
945 {
946 	/*
947 	 * number of VMs
948 	 * VMID 0 is reserved for System
949 	 * amdgpu graphics/compute will use VMIDs 1-7
950 	 * amdkfd will use VMIDs 8-15
951 	 */
952 	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
953 	adev->vm_manager.num_level = 1;
954 	amdgpu_vm_manager_init(adev);
955 
956 	/* base offset of vram pages */
957 	if (adev->flags & AMD_IS_APU) {
958 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
959 		tmp <<= 22;
960 		adev->vm_manager.vram_base_offset = tmp;
961 	} else
962 		adev->vm_manager.vram_base_offset = 0;
963 
964 	return 0;
965 }
966 
967 /**
968  * gmc_v8_0_vm_fini - cik vm fini callback
969  *
970  * @adev: amdgpu_device pointer
971  *
972  * Tear down any asic specific VM setup (CIK).
973  */
974 static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
975 {
976 }
977 
978 /**
979  * gmc_v8_0_vm_decode_fault - print human readable fault info
980  *
981  * @adev: amdgpu_device pointer
982  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
983  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
984  *
985  * Print human readable fault information (CIK).
986  */
987 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
988 				     u32 status, u32 addr, u32 mc_client)
989 {
990 	u32 mc_id;
991 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
992 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
993 					PROTECTIONS);
994 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
995 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
996 
997 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
998 			      MEMORY_CLIENT_ID);
999 
1000 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1001 	       protections, vmid, addr,
1002 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1003 			     MEMORY_CLIENT_RW) ?
1004 	       "write" : "read", block, mc_client, mc_id);
1005 }
1006 
1007 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1008 {
1009 	switch (mc_seq_vram_type) {
1010 	case MC_SEQ_MISC0__MT__GDDR1:
1011 		return AMDGPU_VRAM_TYPE_GDDR1;
1012 	case MC_SEQ_MISC0__MT__DDR2:
1013 		return AMDGPU_VRAM_TYPE_DDR2;
1014 	case MC_SEQ_MISC0__MT__GDDR3:
1015 		return AMDGPU_VRAM_TYPE_GDDR3;
1016 	case MC_SEQ_MISC0__MT__GDDR4:
1017 		return AMDGPU_VRAM_TYPE_GDDR4;
1018 	case MC_SEQ_MISC0__MT__GDDR5:
1019 		return AMDGPU_VRAM_TYPE_GDDR5;
1020 	case MC_SEQ_MISC0__MT__HBM:
1021 		return AMDGPU_VRAM_TYPE_HBM;
1022 	case MC_SEQ_MISC0__MT__DDR3:
1023 		return AMDGPU_VRAM_TYPE_DDR3;
1024 	default:
1025 		return AMDGPU_VRAM_TYPE_UNKNOWN;
1026 	}
1027 }
1028 
1029 static int gmc_v8_0_early_init(void *handle)
1030 {
1031 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 
1033 	gmc_v8_0_set_gart_funcs(adev);
1034 	gmc_v8_0_set_irq_funcs(adev);
1035 
1036 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
1037 	adev->mc.shared_aperture_end =
1038 		adev->mc.shared_aperture_start + (4ULL << 30) - 1;
1039 	adev->mc.private_aperture_start =
1040 		adev->mc.shared_aperture_end + 1;
1041 	adev->mc.private_aperture_end =
1042 		adev->mc.private_aperture_start + (4ULL << 30) - 1;
1043 
1044 	return 0;
1045 }
1046 
1047 static int gmc_v8_0_late_init(void *handle)
1048 {
1049 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050 
1051 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1052 		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
1053 	else
1054 		return 0;
1055 }
1056 
1057 #define mmMC_SEQ_MISC0_FIJI 0xA71
1058 
1059 static int gmc_v8_0_sw_init(void *handle)
1060 {
1061 	int r;
1062 	int dma_bits;
1063 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1064 
1065 	if (adev->flags & AMD_IS_APU) {
1066 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1067 	} else {
1068 		u32 tmp;
1069 
1070 		if (adev->asic_type == CHIP_FIJI)
1071 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1072 		else
1073 			tmp = RREG32(mmMC_SEQ_MISC0);
1074 		tmp &= MC_SEQ_MISC0__MT__MASK;
1075 		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1076 	}
1077 
1078 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
1079 	if (r)
1080 		return r;
1081 
1082 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
1083 	if (r)
1084 		return r;
1085 
1086 	/* Adjust VM size here.
1087 	 * Currently set to 4GB ((1 << 20) 4k pages).
1088 	 * Max GPUVM size for cayman and SI is 40 bits.
1089 	 */
1090 	amdgpu_vm_adjust_size(adev, 64);
1091 	adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
1092 
1093 	/* Set the internal MC address mask
1094 	 * This is the max address of the GPU's
1095 	 * internal address space.
1096 	 */
1097 	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1098 
1099 	/* set DMA mask + need_dma32 flags.
1100 	 * PCIE - can handle 40-bits.
1101 	 * IGP - can handle 40-bits
1102 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1103 	 */
1104 	adev->need_dma32 = false;
1105 	dma_bits = adev->need_dma32 ? 32 : 40;
1106 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1107 	if (r) {
1108 		adev->need_dma32 = true;
1109 		dma_bits = 32;
1110 		pr_warn("amdgpu: No suitable DMA available\n");
1111 	}
1112 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1113 	if (r) {
1114 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1115 		pr_warn("amdgpu: No coherent DMA available\n");
1116 	}
1117 
1118 	r = gmc_v8_0_init_microcode(adev);
1119 	if (r) {
1120 		DRM_ERROR("Failed to load mc firmware!\n");
1121 		return r;
1122 	}
1123 
1124 	r = gmc_v8_0_mc_init(adev);
1125 	if (r)
1126 		return r;
1127 
1128 	/* Memory manager */
1129 	r = amdgpu_bo_init(adev);
1130 	if (r)
1131 		return r;
1132 
1133 	r = gmc_v8_0_gart_init(adev);
1134 	if (r)
1135 		return r;
1136 
1137 	if (!adev->vm_manager.enabled) {
1138 		r = gmc_v8_0_vm_init(adev);
1139 		if (r) {
1140 			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1141 			return r;
1142 		}
1143 		adev->vm_manager.enabled = true;
1144 	}
1145 
1146 	return r;
1147 }
1148 
1149 static int gmc_v8_0_sw_fini(void *handle)
1150 {
1151 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152 
1153 	if (adev->vm_manager.enabled) {
1154 		amdgpu_vm_manager_fini(adev);
1155 		gmc_v8_0_vm_fini(adev);
1156 		adev->vm_manager.enabled = false;
1157 	}
1158 	gmc_v8_0_gart_fini(adev);
1159 	amdgpu_gem_force_release(adev);
1160 	amdgpu_bo_fini(adev);
1161 
1162 	return 0;
1163 }
1164 
1165 static int gmc_v8_0_hw_init(void *handle)
1166 {
1167 	int r;
1168 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1169 
1170 	gmc_v8_0_init_golden_registers(adev);
1171 
1172 	gmc_v8_0_mc_program(adev);
1173 
1174 	if (adev->asic_type == CHIP_TONGA) {
1175 		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1176 		if (r) {
1177 			DRM_ERROR("Failed to load MC firmware!\n");
1178 			return r;
1179 		}
1180 	} else if (adev->asic_type == CHIP_POLARIS11 ||
1181 			adev->asic_type == CHIP_POLARIS10 ||
1182 			adev->asic_type == CHIP_POLARIS12) {
1183 		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1184 		if (r) {
1185 			DRM_ERROR("Failed to load MC firmware!\n");
1186 			return r;
1187 		}
1188 	}
1189 
1190 	r = gmc_v8_0_gart_enable(adev);
1191 	if (r)
1192 		return r;
1193 
1194 	return r;
1195 }
1196 
1197 static int gmc_v8_0_hw_fini(void *handle)
1198 {
1199 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200 
1201 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1202 	gmc_v8_0_gart_disable(adev);
1203 
1204 	return 0;
1205 }
1206 
1207 static int gmc_v8_0_suspend(void *handle)
1208 {
1209 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210 
1211 	if (adev->vm_manager.enabled) {
1212 		gmc_v8_0_vm_fini(adev);
1213 		adev->vm_manager.enabled = false;
1214 	}
1215 	gmc_v8_0_hw_fini(adev);
1216 
1217 	return 0;
1218 }
1219 
1220 static int gmc_v8_0_resume(void *handle)
1221 {
1222 	int r;
1223 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224 
1225 	r = gmc_v8_0_hw_init(adev);
1226 	if (r)
1227 		return r;
1228 
1229 	if (!adev->vm_manager.enabled) {
1230 		r = gmc_v8_0_vm_init(adev);
1231 		if (r) {
1232 			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1233 			return r;
1234 		}
1235 		adev->vm_manager.enabled = true;
1236 	}
1237 
1238 	return r;
1239 }
1240 
1241 static bool gmc_v8_0_is_idle(void *handle)
1242 {
1243 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244 	u32 tmp = RREG32(mmSRBM_STATUS);
1245 
1246 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1247 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1248 		return false;
1249 
1250 	return true;
1251 }
1252 
1253 static int gmc_v8_0_wait_for_idle(void *handle)
1254 {
1255 	unsigned i;
1256 	u32 tmp;
1257 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258 
1259 	for (i = 0; i < adev->usec_timeout; i++) {
1260 		/* read MC_STATUS */
1261 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1262 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1263 					       SRBM_STATUS__MCC_BUSY_MASK |
1264 					       SRBM_STATUS__MCD_BUSY_MASK |
1265 					       SRBM_STATUS__VMC_BUSY_MASK |
1266 					       SRBM_STATUS__VMC1_BUSY_MASK);
1267 		if (!tmp)
1268 			return 0;
1269 		udelay(1);
1270 	}
1271 	return -ETIMEDOUT;
1272 
1273 }
1274 
1275 static bool gmc_v8_0_check_soft_reset(void *handle)
1276 {
1277 	u32 srbm_soft_reset = 0;
1278 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 	u32 tmp = RREG32(mmSRBM_STATUS);
1280 
1281 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1282 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1283 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1284 
1285 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1287 		if (!(adev->flags & AMD_IS_APU))
1288 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1289 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1290 	}
1291 	if (srbm_soft_reset) {
1292 		adev->mc.srbm_soft_reset = srbm_soft_reset;
1293 		return true;
1294 	} else {
1295 		adev->mc.srbm_soft_reset = 0;
1296 		return false;
1297 	}
1298 }
1299 
1300 static int gmc_v8_0_pre_soft_reset(void *handle)
1301 {
1302 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303 
1304 	if (!adev->mc.srbm_soft_reset)
1305 		return 0;
1306 
1307 	gmc_v8_0_mc_stop(adev, &adev->mc.save);
1308 	if (gmc_v8_0_wait_for_idle(adev)) {
1309 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static int gmc_v8_0_soft_reset(void *handle)
1316 {
1317 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 	u32 srbm_soft_reset;
1319 
1320 	if (!adev->mc.srbm_soft_reset)
1321 		return 0;
1322 	srbm_soft_reset = adev->mc.srbm_soft_reset;
1323 
1324 	if (srbm_soft_reset) {
1325 		u32 tmp;
1326 
1327 		tmp = RREG32(mmSRBM_SOFT_RESET);
1328 		tmp |= srbm_soft_reset;
1329 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1330 		WREG32(mmSRBM_SOFT_RESET, tmp);
1331 		tmp = RREG32(mmSRBM_SOFT_RESET);
1332 
1333 		udelay(50);
1334 
1335 		tmp &= ~srbm_soft_reset;
1336 		WREG32(mmSRBM_SOFT_RESET, tmp);
1337 		tmp = RREG32(mmSRBM_SOFT_RESET);
1338 
1339 		/* Wait a little for things to settle down */
1340 		udelay(50);
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 static int gmc_v8_0_post_soft_reset(void *handle)
1347 {
1348 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1349 
1350 	if (!adev->mc.srbm_soft_reset)
1351 		return 0;
1352 
1353 	gmc_v8_0_mc_resume(adev, &adev->mc.save);
1354 	return 0;
1355 }
1356 
1357 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1358 					     struct amdgpu_irq_src *src,
1359 					     unsigned type,
1360 					     enum amdgpu_interrupt_state state)
1361 {
1362 	u32 tmp;
1363 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1364 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1365 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1366 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1367 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1368 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1369 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1370 
1371 	switch (state) {
1372 	case AMDGPU_IRQ_STATE_DISABLE:
1373 		/* system context */
1374 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1375 		tmp &= ~bits;
1376 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1377 		/* VMs */
1378 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1379 		tmp &= ~bits;
1380 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1381 		break;
1382 	case AMDGPU_IRQ_STATE_ENABLE:
1383 		/* system context */
1384 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1385 		tmp |= bits;
1386 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1387 		/* VMs */
1388 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1389 		tmp |= bits;
1390 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1391 		break;
1392 	default:
1393 		break;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1400 				      struct amdgpu_irq_src *source,
1401 				      struct amdgpu_iv_entry *entry)
1402 {
1403 	u32 addr, status, mc_client;
1404 
1405 	if (amdgpu_sriov_vf(adev)) {
1406 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1407 			entry->src_id, entry->src_data[0]);
1408 		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1409 		return 0;
1410 	}
1411 
1412 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1413 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1414 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1415 	/* reset addr and status */
1416 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1417 
1418 	if (!addr && !status)
1419 		return 0;
1420 
1421 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1422 		gmc_v8_0_set_fault_enable_default(adev, false);
1423 
1424 	if (printk_ratelimit()) {
1425 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1426 			entry->src_id, entry->src_data[0]);
1427 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1428 			addr);
1429 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1430 			status);
1431 		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1438 						     bool enable)
1439 {
1440 	uint32_t data;
1441 
1442 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1443 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1444 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1445 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1446 
1447 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1448 		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1449 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1450 
1451 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1452 		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1453 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1454 
1455 		data = RREG32(mmMC_XPB_CLK_GAT);
1456 		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1457 		WREG32(mmMC_XPB_CLK_GAT, data);
1458 
1459 		data = RREG32(mmATC_MISC_CG);
1460 		data |= ATC_MISC_CG__ENABLE_MASK;
1461 		WREG32(mmATC_MISC_CG, data);
1462 
1463 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1464 		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1465 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1466 
1467 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1468 		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1469 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1470 
1471 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1472 		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1473 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1474 
1475 		data = RREG32(mmVM_L2_CG);
1476 		data |= VM_L2_CG__ENABLE_MASK;
1477 		WREG32(mmVM_L2_CG, data);
1478 	} else {
1479 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1480 		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1481 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1482 
1483 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1484 		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1485 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1486 
1487 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1488 		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1489 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1490 
1491 		data = RREG32(mmMC_XPB_CLK_GAT);
1492 		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1493 		WREG32(mmMC_XPB_CLK_GAT, data);
1494 
1495 		data = RREG32(mmATC_MISC_CG);
1496 		data &= ~ATC_MISC_CG__ENABLE_MASK;
1497 		WREG32(mmATC_MISC_CG, data);
1498 
1499 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1500 		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1501 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1502 
1503 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1504 		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1505 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1506 
1507 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1508 		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1509 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1510 
1511 		data = RREG32(mmVM_L2_CG);
1512 		data &= ~VM_L2_CG__ENABLE_MASK;
1513 		WREG32(mmVM_L2_CG, data);
1514 	}
1515 }
1516 
1517 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1518 				       bool enable)
1519 {
1520 	uint32_t data;
1521 
1522 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1523 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1524 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1525 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1526 
1527 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1528 		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1529 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1530 
1531 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1532 		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1533 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1534 
1535 		data = RREG32(mmMC_XPB_CLK_GAT);
1536 		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1537 		WREG32(mmMC_XPB_CLK_GAT, data);
1538 
1539 		data = RREG32(mmATC_MISC_CG);
1540 		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1541 		WREG32(mmATC_MISC_CG, data);
1542 
1543 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1544 		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1545 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1546 
1547 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1548 		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1549 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1550 
1551 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1552 		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1553 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1554 
1555 		data = RREG32(mmVM_L2_CG);
1556 		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1557 		WREG32(mmVM_L2_CG, data);
1558 	} else {
1559 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1560 		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1561 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1562 
1563 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1564 		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1565 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1566 
1567 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1568 		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1569 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1570 
1571 		data = RREG32(mmMC_XPB_CLK_GAT);
1572 		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1573 		WREG32(mmMC_XPB_CLK_GAT, data);
1574 
1575 		data = RREG32(mmATC_MISC_CG);
1576 		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1577 		WREG32(mmATC_MISC_CG, data);
1578 
1579 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1580 		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1581 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1582 
1583 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1584 		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1585 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1586 
1587 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1588 		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1589 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1590 
1591 		data = RREG32(mmVM_L2_CG);
1592 		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1593 		WREG32(mmVM_L2_CG, data);
1594 	}
1595 }
1596 
1597 static int gmc_v8_0_set_clockgating_state(void *handle,
1598 					  enum amd_clockgating_state state)
1599 {
1600 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1601 
1602 	if (amdgpu_sriov_vf(adev))
1603 		return 0;
1604 
1605 	switch (adev->asic_type) {
1606 	case CHIP_FIJI:
1607 		fiji_update_mc_medium_grain_clock_gating(adev,
1608 				state == AMD_CG_STATE_GATE);
1609 		fiji_update_mc_light_sleep(adev,
1610 				state == AMD_CG_STATE_GATE);
1611 		break;
1612 	default:
1613 		break;
1614 	}
1615 	return 0;
1616 }
1617 
1618 static int gmc_v8_0_set_powergating_state(void *handle,
1619 					  enum amd_powergating_state state)
1620 {
1621 	return 0;
1622 }
1623 
1624 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1625 {
1626 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1627 	int data;
1628 
1629 	if (amdgpu_sriov_vf(adev))
1630 		*flags = 0;
1631 
1632 	/* AMD_CG_SUPPORT_MC_MGCG */
1633 	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1634 	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1635 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1636 
1637 	/* AMD_CG_SUPPORT_MC_LS */
1638 	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1639 		*flags |= AMD_CG_SUPPORT_MC_LS;
1640 }
1641 
1642 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1643 	.name = "gmc_v8_0",
1644 	.early_init = gmc_v8_0_early_init,
1645 	.late_init = gmc_v8_0_late_init,
1646 	.sw_init = gmc_v8_0_sw_init,
1647 	.sw_fini = gmc_v8_0_sw_fini,
1648 	.hw_init = gmc_v8_0_hw_init,
1649 	.hw_fini = gmc_v8_0_hw_fini,
1650 	.suspend = gmc_v8_0_suspend,
1651 	.resume = gmc_v8_0_resume,
1652 	.is_idle = gmc_v8_0_is_idle,
1653 	.wait_for_idle = gmc_v8_0_wait_for_idle,
1654 	.check_soft_reset = gmc_v8_0_check_soft_reset,
1655 	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1656 	.soft_reset = gmc_v8_0_soft_reset,
1657 	.post_soft_reset = gmc_v8_0_post_soft_reset,
1658 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1659 	.set_powergating_state = gmc_v8_0_set_powergating_state,
1660 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1661 };
1662 
1663 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
1664 	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
1665 	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
1666 	.set_prt = gmc_v8_0_set_prt,
1667 	.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags
1668 };
1669 
1670 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1671 	.set = gmc_v8_0_vm_fault_interrupt_state,
1672 	.process = gmc_v8_0_process_interrupt,
1673 };
1674 
1675 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
1676 {
1677 	if (adev->gart.gart_funcs == NULL)
1678 		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
1679 }
1680 
1681 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1682 {
1683 	adev->mc.vm_fault.num_types = 1;
1684 	adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1685 }
1686 
1687 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1688 {
1689 	.type = AMD_IP_BLOCK_TYPE_GMC,
1690 	.major = 8,
1691 	.minor = 0,
1692 	.rev = 0,
1693 	.funcs = &gmc_v8_0_ip_funcs,
1694 };
1695 
1696 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1697 {
1698 	.type = AMD_IP_BLOCK_TYPE_GMC,
1699 	.major = 8,
1700 	.minor = 1,
1701 	.rev = 0,
1702 	.funcs = &gmc_v8_0_ip_funcs,
1703 };
1704 
1705 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1706 {
1707 	.type = AMD_IP_BLOCK_TYPE_GMC,
1708 	.major = 8,
1709 	.minor = 5,
1710 	.rev = 0,
1711 	.funcs = &gmc_v8_0_ip_funcs,
1712 };
1713