xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c (revision 593043d35ddff8ab033546c2a89bb1d4080d03e1)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "cikd.h"
27 #include "cik.h"
28 #include "gmc_v7_0.h"
29 #include "amdgpu_ucode.h"
30 
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
33 
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
36 
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39 
40 #include "amdgpu_atombios.h"
41 
42 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
43 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
44 static int gmc_v7_0_wait_for_idle(void *handle);
45 
46 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
47 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
48 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
49 
50 static const u32 golden_settings_iceland_a11[] =
51 {
52 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
53 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
54 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
55 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
56 };
57 
58 static const u32 iceland_mgcg_cgcg_init[] =
59 {
60 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
61 };
62 
63 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
64 {
65 	switch (adev->asic_type) {
66 	case CHIP_TOPAZ:
67 		amdgpu_program_register_sequence(adev,
68 						 iceland_mgcg_cgcg_init,
69 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
70 		amdgpu_program_register_sequence(adev,
71 						 golden_settings_iceland_a11,
72 						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
73 		break;
74 	default:
75 		break;
76 	}
77 }
78 
79 static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
80 			     struct amdgpu_mode_mc_save *save)
81 {
82 	u32 blackout;
83 
84 	if (adev->mode_info.num_crtc)
85 		amdgpu_display_stop_mc_access(adev, save);
86 
87 	gmc_v7_0_wait_for_idle((void *)adev);
88 
89 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
90 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
91 		/* Block CPU access */
92 		WREG32(mmBIF_FB_EN, 0);
93 		/* blackout the MC */
94 		blackout = REG_SET_FIELD(blackout,
95 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
96 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
97 	}
98 	/* wait for the MC to settle */
99 	udelay(100);
100 }
101 
102 static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
103 			       struct amdgpu_mode_mc_save *save)
104 {
105 	u32 tmp;
106 
107 	/* unblackout the MC */
108 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
109 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
110 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
111 	/* allow CPU access */
112 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
113 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
114 	WREG32(mmBIF_FB_EN, tmp);
115 
116 	if (adev->mode_info.num_crtc)
117 		amdgpu_display_resume_mc_access(adev, save);
118 }
119 
120 /**
121  * gmc_v7_0_init_microcode - load ucode images from disk
122  *
123  * @adev: amdgpu_device pointer
124  *
125  * Use the firmware interface to load the ucode images into
126  * the driver (not loaded into hw).
127  * Returns 0 on success, error on failure.
128  */
129 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
130 {
131 	const char *chip_name;
132 	char fw_name[30];
133 	int err;
134 
135 	DRM_DEBUG("\n");
136 
137 	switch (adev->asic_type) {
138 	case CHIP_BONAIRE:
139 		chip_name = "bonaire";
140 		break;
141 	case CHIP_HAWAII:
142 		chip_name = "hawaii";
143 		break;
144 	case CHIP_TOPAZ:
145 		chip_name = "topaz";
146 		break;
147 	case CHIP_KAVERI:
148 	case CHIP_KABINI:
149 	case CHIP_MULLINS:
150 		return 0;
151 	default: BUG();
152 	}
153 
154 	if (adev->asic_type == CHIP_TOPAZ)
155 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
156 	else
157 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
158 
159 	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
160 	if (err)
161 		goto out;
162 	err = amdgpu_ucode_validate(adev->mc.fw);
163 
164 out:
165 	if (err) {
166 		pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
167 		release_firmware(adev->mc.fw);
168 		adev->mc.fw = NULL;
169 	}
170 	return err;
171 }
172 
173 /**
174  * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
175  *
176  * @adev: amdgpu_device pointer
177  *
178  * Load the GDDR MC ucode into the hw (CIK).
179  * Returns 0 on success, error on failure.
180  */
181 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
182 {
183 	const struct mc_firmware_header_v1_0 *hdr;
184 	const __le32 *fw_data = NULL;
185 	const __le32 *io_mc_regs = NULL;
186 	u32 running;
187 	int i, ucode_size, regs_size;
188 
189 	if (!adev->mc.fw)
190 		return -EINVAL;
191 
192 	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
193 	amdgpu_ucode_print_mc_hdr(&hdr->header);
194 
195 	adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
196 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
197 	io_mc_regs = (const __le32 *)
198 		(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
199 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
200 	fw_data = (const __le32 *)
201 		(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
202 
203 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
204 
205 	if (running == 0) {
206 		/* reset the engine and set to writable */
207 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
208 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
209 
210 		/* load mc io regs */
211 		for (i = 0; i < regs_size; i++) {
212 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
213 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
214 		}
215 		/* load the MC ucode */
216 		for (i = 0; i < ucode_size; i++)
217 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
218 
219 		/* put the engine back into the active state */
220 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
221 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
222 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
223 
224 		/* wait for training to complete */
225 		for (i = 0; i < adev->usec_timeout; i++) {
226 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
227 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
228 				break;
229 			udelay(1);
230 		}
231 		for (i = 0; i < adev->usec_timeout; i++) {
232 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
233 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
234 				break;
235 			udelay(1);
236 		}
237 	}
238 
239 	return 0;
240 }
241 
242 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
243 				       struct amdgpu_mc *mc)
244 {
245 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
246 		/* leave room for at least 1024M GTT */
247 		dev_warn(adev->dev, "limiting VRAM\n");
248 		mc->real_vram_size = 0xFFC0000000ULL;
249 		mc->mc_vram_size = 0xFFC0000000ULL;
250 	}
251 	amdgpu_vram_location(adev, &adev->mc, 0);
252 	adev->mc.gtt_base_align = 0;
253 	amdgpu_gtt_location(adev, mc);
254 }
255 
256 /**
257  * gmc_v7_0_mc_program - program the GPU memory controller
258  *
259  * @adev: amdgpu_device pointer
260  *
261  * Set the location of vram, gart, and AGP in the GPU's
262  * physical address space (CIK).
263  */
264 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
265 {
266 	struct amdgpu_mode_mc_save save;
267 	u32 tmp;
268 	int i, j;
269 
270 	/* Initialize HDP */
271 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
272 		WREG32((0xb05 + j), 0x00000000);
273 		WREG32((0xb06 + j), 0x00000000);
274 		WREG32((0xb07 + j), 0x00000000);
275 		WREG32((0xb08 + j), 0x00000000);
276 		WREG32((0xb09 + j), 0x00000000);
277 	}
278 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
279 
280 	if (adev->mode_info.num_crtc)
281 		amdgpu_display_set_vga_render_state(adev, false);
282 
283 	gmc_v7_0_mc_stop(adev, &save);
284 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
285 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
286 	}
287 	/* Update configuration */
288 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
289 	       adev->mc.vram_start >> 12);
290 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
291 	       adev->mc.vram_end >> 12);
292 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
293 	       adev->vram_scratch.gpu_addr >> 12);
294 	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
295 	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
296 	WREG32(mmMC_VM_FB_LOCATION, tmp);
297 	/* XXX double check these! */
298 	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
299 	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
300 	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
301 	WREG32(mmMC_VM_AGP_BASE, 0);
302 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
303 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
304 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
305 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
306 	}
307 	gmc_v7_0_mc_resume(adev, &save);
308 
309 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
310 
311 	tmp = RREG32(mmHDP_MISC_CNTL);
312 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
313 	WREG32(mmHDP_MISC_CNTL, tmp);
314 
315 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
316 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
317 }
318 
319 /**
320  * gmc_v7_0_mc_init - initialize the memory controller driver params
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Look up the amount of vram, vram width, and decide how to place
325  * vram and gart within the GPU's physical address space (CIK).
326  * Returns 0 for success.
327  */
328 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
329 {
330 	adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
331 	if (!adev->mc.vram_width) {
332 		u32 tmp;
333 		int chansize, numchan;
334 
335 		/* Get VRAM informations */
336 		tmp = RREG32(mmMC_ARB_RAMCFG);
337 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
338 			chansize = 64;
339 		} else {
340 			chansize = 32;
341 		}
342 		tmp = RREG32(mmMC_SHARED_CHMAP);
343 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
344 		case 0:
345 		default:
346 			numchan = 1;
347 			break;
348 		case 1:
349 			numchan = 2;
350 			break;
351 		case 2:
352 			numchan = 4;
353 			break;
354 		case 3:
355 			numchan = 8;
356 			break;
357 		case 4:
358 			numchan = 3;
359 			break;
360 		case 5:
361 			numchan = 6;
362 			break;
363 		case 6:
364 			numchan = 10;
365 			break;
366 		case 7:
367 			numchan = 12;
368 			break;
369 		case 8:
370 			numchan = 16;
371 			break;
372 		}
373 		adev->mc.vram_width = numchan * chansize;
374 	}
375 	/* Could aper size report 0 ? */
376 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
377 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
378 	/* size in MB on si */
379 	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
380 	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
381 
382 #ifdef CONFIG_X86_64
383 	if (adev->flags & AMD_IS_APU) {
384 		adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
385 		adev->mc.aper_size = adev->mc.real_vram_size;
386 	}
387 #endif
388 
389 	/* In case the PCI BAR is larger than the actual amount of vram */
390 	adev->mc.visible_vram_size = adev->mc.aper_size;
391 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
392 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
393 
394 	/* unless the user had overridden it, set the gart
395 	 * size equal to the 1024 or vram, whichever is larger.
396 	 */
397 	if (amdgpu_gart_size == -1)
398 		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
399 	else
400 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
401 
402 	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
403 
404 	return 0;
405 }
406 
407 /*
408  * GART
409  * VMID 0 is the physical GPU addresses as used by the kernel.
410  * VMIDs 1-15 are used for userspace clients and are handled
411  * by the amdgpu vm/hsa code.
412  */
413 
414 /**
415  * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
416  *
417  * @adev: amdgpu_device pointer
418  * @vmid: vm instance to flush
419  *
420  * Flush the TLB for the requested page table (CIK).
421  */
422 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
423 					uint32_t vmid)
424 {
425 	/* flush hdp cache */
426 	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
427 
428 	/* bits 0-15 are the VM contexts0-15 */
429 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
430 }
431 
432 /**
433  * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
434  *
435  * @adev: amdgpu_device pointer
436  * @cpu_pt_addr: cpu address of the page table
437  * @gpu_page_idx: entry in the page table to update
438  * @addr: dst addr to write into pte/pde
439  * @flags: access flags
440  *
441  * Update the page tables using the CPU.
442  */
443 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
444 				     void *cpu_pt_addr,
445 				     uint32_t gpu_page_idx,
446 				     uint64_t addr,
447 				     uint64_t flags)
448 {
449 	void __iomem *ptr = (void *)cpu_pt_addr;
450 	uint64_t value;
451 
452 	value = addr & 0xFFFFFFFFFFFFF000ULL;
453 	value |= flags;
454 	writeq(value, ptr + (gpu_page_idx * 8));
455 
456 	return 0;
457 }
458 
459 static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
460 					  uint32_t flags)
461 {
462 	uint64_t pte_flag = 0;
463 
464 	if (flags & AMDGPU_VM_PAGE_READABLE)
465 		pte_flag |= AMDGPU_PTE_READABLE;
466 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
467 		pte_flag |= AMDGPU_PTE_WRITEABLE;
468 	if (flags & AMDGPU_VM_PAGE_PRT)
469 		pte_flag |= AMDGPU_PTE_PRT;
470 
471 	return pte_flag;
472 }
473 
474 /**
475  * gmc_v8_0_set_fault_enable_default - update VM fault handling
476  *
477  * @adev: amdgpu_device pointer
478  * @value: true redirects VM faults to the default page
479  */
480 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
481 					      bool value)
482 {
483 	u32 tmp;
484 
485 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
486 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
487 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
488 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
489 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
490 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
491 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
492 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
493 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
494 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
495 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
496 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
497 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
498 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
499 }
500 
501 /**
502  * gmc_v7_0_set_prt - set PRT VM fault
503  *
504  * @adev: amdgpu_device pointer
505  * @enable: enable/disable VM fault handling for PRT
506  */
507 static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
508 {
509 	uint32_t tmp;
510 
511 	if (enable && !adev->mc.prt_warning) {
512 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
513 		adev->mc.prt_warning = true;
514 	}
515 
516 	tmp = RREG32(mmVM_PRT_CNTL);
517 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
518 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
519 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
520 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
521 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
522 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
523 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
524 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
525 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
526 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
527 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
528 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
529 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
530 			    MASK_PDE0_FAULT, enable);
531 	WREG32(mmVM_PRT_CNTL, tmp);
532 
533 	if (enable) {
534 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
535 		uint32_t high = adev->vm_manager.max_pfn;
536 
537 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
538 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
539 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
540 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
541 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
542 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
543 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
544 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
545 	} else {
546 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
547 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
548 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
549 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
550 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
551 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
552 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
553 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
554 	}
555 }
556 
557 /**
558  * gmc_v7_0_gart_enable - gart enable
559  *
560  * @adev: amdgpu_device pointer
561  *
562  * This sets up the TLBs, programs the page tables for VMID0,
563  * sets up the hw for VMIDs 1-15 which are allocated on
564  * demand, and sets up the global locations for the LDS, GDS,
565  * and GPUVM for FSA64 clients (CIK).
566  * Returns 0 for success, errors for failure.
567  */
568 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
569 {
570 	int r, i;
571 	u32 tmp;
572 
573 	if (adev->gart.robj == NULL) {
574 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
575 		return -EINVAL;
576 	}
577 	r = amdgpu_gart_table_vram_pin(adev);
578 	if (r)
579 		return r;
580 	/* Setup TLB control */
581 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
582 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
583 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
584 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
585 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
586 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
587 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
588 	/* Setup L2 cache */
589 	tmp = RREG32(mmVM_L2_CNTL);
590 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
591 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
592 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
593 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
594 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
595 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
596 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
597 	WREG32(mmVM_L2_CNTL, tmp);
598 	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
599 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
600 	WREG32(mmVM_L2_CNTL2, tmp);
601 	tmp = RREG32(mmVM_L2_CNTL3);
602 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
603 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
604 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
605 	WREG32(mmVM_L2_CNTL3, tmp);
606 	/* setup context0 */
607 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
608 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
609 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
610 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
611 			(u32)(adev->dummy_page.addr >> 12));
612 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
613 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
614 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
615 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
616 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
617 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
618 
619 	WREG32(0x575, 0);
620 	WREG32(0x576, 0);
621 	WREG32(0x577, 0);
622 
623 	/* empty context1-15 */
624 	/* FIXME start with 4G, once using 2 level pt switch to full
625 	 * vm size space
626 	 */
627 	/* set vm size, must be a multiple of 4 */
628 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
629 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
630 	for (i = 1; i < 16; i++) {
631 		if (i < 8)
632 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
633 			       adev->gart.table_addr >> 12);
634 		else
635 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
636 			       adev->gart.table_addr >> 12);
637 	}
638 
639 	/* enable context1-15 */
640 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
641 	       (u32)(adev->dummy_page.addr >> 12));
642 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
643 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
644 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
645 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
646 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
647 			    adev->vm_manager.block_size - 9);
648 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
649 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
650 		gmc_v7_0_set_fault_enable_default(adev, false);
651 	else
652 		gmc_v7_0_set_fault_enable_default(adev, true);
653 
654 	if (adev->asic_type == CHIP_KAVERI) {
655 		tmp = RREG32(mmCHUB_CONTROL);
656 		tmp &= ~BYPASS_VM;
657 		WREG32(mmCHUB_CONTROL, tmp);
658 	}
659 
660 	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
661 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
662 		 (unsigned)(adev->mc.gtt_size >> 20),
663 		 (unsigned long long)adev->gart.table_addr);
664 	adev->gart.ready = true;
665 	return 0;
666 }
667 
668 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
669 {
670 	int r;
671 
672 	if (adev->gart.robj) {
673 		WARN(1, "R600 PCIE GART already initialized\n");
674 		return 0;
675 	}
676 	/* Initialize common gart structure */
677 	r = amdgpu_gart_init(adev);
678 	if (r)
679 		return r;
680 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
681 	adev->gart.gart_pte_flags = 0;
682 	return amdgpu_gart_table_vram_alloc(adev);
683 }
684 
685 /**
686  * gmc_v7_0_gart_disable - gart disable
687  *
688  * @adev: amdgpu_device pointer
689  *
690  * This disables all VM page table (CIK).
691  */
692 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
693 {
694 	u32 tmp;
695 
696 	/* Disable all tables */
697 	WREG32(mmVM_CONTEXT0_CNTL, 0);
698 	WREG32(mmVM_CONTEXT1_CNTL, 0);
699 	/* Setup TLB control */
700 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
701 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
702 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
703 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
704 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
705 	/* Setup L2 cache */
706 	tmp = RREG32(mmVM_L2_CNTL);
707 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
708 	WREG32(mmVM_L2_CNTL, tmp);
709 	WREG32(mmVM_L2_CNTL2, 0);
710 	amdgpu_gart_table_vram_unpin(adev);
711 }
712 
713 /**
714  * gmc_v7_0_gart_fini - vm fini callback
715  *
716  * @adev: amdgpu_device pointer
717  *
718  * Tears down the driver GART/VM setup (CIK).
719  */
720 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
721 {
722 	amdgpu_gart_table_vram_free(adev);
723 	amdgpu_gart_fini(adev);
724 }
725 
726 /*
727  * vm
728  * VMID 0 is the physical GPU addresses as used by the kernel.
729  * VMIDs 1-15 are used for userspace clients and are handled
730  * by the amdgpu vm/hsa code.
731  */
732 /**
733  * gmc_v7_0_vm_init - cik vm init callback
734  *
735  * @adev: amdgpu_device pointer
736  *
737  * Inits cik specific vm parameters (number of VMs, base of vram for
738  * VMIDs 1-15) (CIK).
739  * Returns 0 for success.
740  */
741 static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
742 {
743 	/*
744 	 * number of VMs
745 	 * VMID 0 is reserved for System
746 	 * amdgpu graphics/compute will use VMIDs 1-7
747 	 * amdkfd will use VMIDs 8-15
748 	 */
749 	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
750 	adev->vm_manager.num_level = 1;
751 	amdgpu_vm_manager_init(adev);
752 
753 	/* base offset of vram pages */
754 	if (adev->flags & AMD_IS_APU) {
755 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
756 		tmp <<= 22;
757 		adev->vm_manager.vram_base_offset = tmp;
758 	} else
759 		adev->vm_manager.vram_base_offset = 0;
760 
761 	return 0;
762 }
763 
764 /**
765  * gmc_v7_0_vm_fini - cik vm fini callback
766  *
767  * @adev: amdgpu_device pointer
768  *
769  * Tear down any asic specific VM setup (CIK).
770  */
771 static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
772 {
773 }
774 
775 /**
776  * gmc_v7_0_vm_decode_fault - print human readable fault info
777  *
778  * @adev: amdgpu_device pointer
779  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
780  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
781  *
782  * Print human readable fault information (CIK).
783  */
784 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
785 				     u32 status, u32 addr, u32 mc_client)
786 {
787 	u32 mc_id;
788 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
789 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
790 					PROTECTIONS);
791 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
792 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
793 
794 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
795 			      MEMORY_CLIENT_ID);
796 
797 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
798 	       protections, vmid, addr,
799 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
800 			     MEMORY_CLIENT_RW) ?
801 	       "write" : "read", block, mc_client, mc_id);
802 }
803 
804 
805 static const u32 mc_cg_registers[] = {
806 	mmMC_HUB_MISC_HUB_CG,
807 	mmMC_HUB_MISC_SIP_CG,
808 	mmMC_HUB_MISC_VM_CG,
809 	mmMC_XPB_CLK_GAT,
810 	mmATC_MISC_CG,
811 	mmMC_CITF_MISC_WR_CG,
812 	mmMC_CITF_MISC_RD_CG,
813 	mmMC_CITF_MISC_VM_CG,
814 	mmVM_L2_CG,
815 };
816 
817 static const u32 mc_cg_ls_en[] = {
818 	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
819 	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
820 	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
821 	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
822 	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
823 	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
824 	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
825 	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
826 	VM_L2_CG__MEM_LS_ENABLE_MASK,
827 };
828 
829 static const u32 mc_cg_en[] = {
830 	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
831 	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
832 	MC_HUB_MISC_VM_CG__ENABLE_MASK,
833 	MC_XPB_CLK_GAT__ENABLE_MASK,
834 	ATC_MISC_CG__ENABLE_MASK,
835 	MC_CITF_MISC_WR_CG__ENABLE_MASK,
836 	MC_CITF_MISC_RD_CG__ENABLE_MASK,
837 	MC_CITF_MISC_VM_CG__ENABLE_MASK,
838 	VM_L2_CG__ENABLE_MASK,
839 };
840 
841 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
842 				  bool enable)
843 {
844 	int i;
845 	u32 orig, data;
846 
847 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
848 		orig = data = RREG32(mc_cg_registers[i]);
849 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
850 			data |= mc_cg_ls_en[i];
851 		else
852 			data &= ~mc_cg_ls_en[i];
853 		if (data != orig)
854 			WREG32(mc_cg_registers[i], data);
855 	}
856 }
857 
858 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
859 				    bool enable)
860 {
861 	int i;
862 	u32 orig, data;
863 
864 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
865 		orig = data = RREG32(mc_cg_registers[i]);
866 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
867 			data |= mc_cg_en[i];
868 		else
869 			data &= ~mc_cg_en[i];
870 		if (data != orig)
871 			WREG32(mc_cg_registers[i], data);
872 	}
873 }
874 
875 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
876 				     bool enable)
877 {
878 	u32 orig, data;
879 
880 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
881 
882 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
883 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
884 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
885 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
886 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
887 	} else {
888 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
889 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
890 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
891 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
892 	}
893 
894 	if (orig != data)
895 		WREG32_PCIE(ixPCIE_CNTL2, data);
896 }
897 
898 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
899 				     bool enable)
900 {
901 	u32 orig, data;
902 
903 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
904 
905 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
906 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
907 	else
908 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
909 
910 	if (orig != data)
911 		WREG32(mmHDP_HOST_PATH_CNTL, data);
912 }
913 
914 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
915 				   bool enable)
916 {
917 	u32 orig, data;
918 
919 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
920 
921 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
922 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
923 	else
924 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
925 
926 	if (orig != data)
927 		WREG32(mmHDP_MEM_POWER_LS, data);
928 }
929 
930 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
931 {
932 	switch (mc_seq_vram_type) {
933 	case MC_SEQ_MISC0__MT__GDDR1:
934 		return AMDGPU_VRAM_TYPE_GDDR1;
935 	case MC_SEQ_MISC0__MT__DDR2:
936 		return AMDGPU_VRAM_TYPE_DDR2;
937 	case MC_SEQ_MISC0__MT__GDDR3:
938 		return AMDGPU_VRAM_TYPE_GDDR3;
939 	case MC_SEQ_MISC0__MT__GDDR4:
940 		return AMDGPU_VRAM_TYPE_GDDR4;
941 	case MC_SEQ_MISC0__MT__GDDR5:
942 		return AMDGPU_VRAM_TYPE_GDDR5;
943 	case MC_SEQ_MISC0__MT__HBM:
944 		return AMDGPU_VRAM_TYPE_HBM;
945 	case MC_SEQ_MISC0__MT__DDR3:
946 		return AMDGPU_VRAM_TYPE_DDR3;
947 	default:
948 		return AMDGPU_VRAM_TYPE_UNKNOWN;
949 	}
950 }
951 
952 static int gmc_v7_0_early_init(void *handle)
953 {
954 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
955 
956 	gmc_v7_0_set_gart_funcs(adev);
957 	gmc_v7_0_set_irq_funcs(adev);
958 
959 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
960 	adev->mc.shared_aperture_end =
961 		adev->mc.shared_aperture_start + (4ULL << 30) - 1;
962 	adev->mc.private_aperture_start =
963 		adev->mc.shared_aperture_end + 1;
964 	adev->mc.private_aperture_end =
965 		adev->mc.private_aperture_start + (4ULL << 30) - 1;
966 
967 	return 0;
968 }
969 
970 static int gmc_v7_0_late_init(void *handle)
971 {
972 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
973 
974 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
975 		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
976 	else
977 		return 0;
978 }
979 
980 static int gmc_v7_0_sw_init(void *handle)
981 {
982 	int r;
983 	int dma_bits;
984 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
985 
986 	if (adev->flags & AMD_IS_APU) {
987 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
988 	} else {
989 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
990 		tmp &= MC_SEQ_MISC0__MT__MASK;
991 		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
992 	}
993 
994 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
995 	if (r)
996 		return r;
997 
998 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
999 	if (r)
1000 		return r;
1001 
1002 	/* Adjust VM size here.
1003 	 * Currently set to 4GB ((1 << 20) 4k pages).
1004 	 * Max GPUVM size for cayman and SI is 40 bits.
1005 	 */
1006 	amdgpu_vm_adjust_size(adev, 64);
1007 	adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
1008 
1009 	/* Set the internal MC address mask
1010 	 * This is the max address of the GPU's
1011 	 * internal address space.
1012 	 */
1013 	adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1014 
1015 	/* set DMA mask + need_dma32 flags.
1016 	 * PCIE - can handle 40-bits.
1017 	 * IGP - can handle 40-bits
1018 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1019 	 */
1020 	adev->need_dma32 = false;
1021 	dma_bits = adev->need_dma32 ? 32 : 40;
1022 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1023 	if (r) {
1024 		adev->need_dma32 = true;
1025 		dma_bits = 32;
1026 		pr_warn("amdgpu: No suitable DMA available\n");
1027 	}
1028 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1029 	if (r) {
1030 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1031 		pr_warn("amdgpu: No coherent DMA available\n");
1032 	}
1033 
1034 	r = gmc_v7_0_init_microcode(adev);
1035 	if (r) {
1036 		DRM_ERROR("Failed to load mc firmware!\n");
1037 		return r;
1038 	}
1039 
1040 	r = gmc_v7_0_mc_init(adev);
1041 	if (r)
1042 		return r;
1043 
1044 	/* Memory manager */
1045 	r = amdgpu_bo_init(adev);
1046 	if (r)
1047 		return r;
1048 
1049 	r = gmc_v7_0_gart_init(adev);
1050 	if (r)
1051 		return r;
1052 
1053 	if (!adev->vm_manager.enabled) {
1054 		r = gmc_v7_0_vm_init(adev);
1055 		if (r) {
1056 			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1057 			return r;
1058 		}
1059 		adev->vm_manager.enabled = true;
1060 	}
1061 
1062 	return r;
1063 }
1064 
1065 static int gmc_v7_0_sw_fini(void *handle)
1066 {
1067 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1068 
1069 	if (adev->vm_manager.enabled) {
1070 		amdgpu_vm_manager_fini(adev);
1071 		gmc_v7_0_vm_fini(adev);
1072 		adev->vm_manager.enabled = false;
1073 	}
1074 	gmc_v7_0_gart_fini(adev);
1075 	amdgpu_gem_force_release(adev);
1076 	amdgpu_bo_fini(adev);
1077 
1078 	return 0;
1079 }
1080 
1081 static int gmc_v7_0_hw_init(void *handle)
1082 {
1083 	int r;
1084 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085 
1086 	gmc_v7_0_init_golden_registers(adev);
1087 
1088 	gmc_v7_0_mc_program(adev);
1089 
1090 	if (!(adev->flags & AMD_IS_APU)) {
1091 		r = gmc_v7_0_mc_load_microcode(adev);
1092 		if (r) {
1093 			DRM_ERROR("Failed to load MC firmware!\n");
1094 			return r;
1095 		}
1096 	}
1097 
1098 	r = gmc_v7_0_gart_enable(adev);
1099 	if (r)
1100 		return r;
1101 
1102 	return r;
1103 }
1104 
1105 static int gmc_v7_0_hw_fini(void *handle)
1106 {
1107 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1108 
1109 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1110 	gmc_v7_0_gart_disable(adev);
1111 
1112 	return 0;
1113 }
1114 
1115 static int gmc_v7_0_suspend(void *handle)
1116 {
1117 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1118 
1119 	if (adev->vm_manager.enabled) {
1120 		gmc_v7_0_vm_fini(adev);
1121 		adev->vm_manager.enabled = false;
1122 	}
1123 	gmc_v7_0_hw_fini(adev);
1124 
1125 	return 0;
1126 }
1127 
1128 static int gmc_v7_0_resume(void *handle)
1129 {
1130 	int r;
1131 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1132 
1133 	r = gmc_v7_0_hw_init(adev);
1134 	if (r)
1135 		return r;
1136 
1137 	if (!adev->vm_manager.enabled) {
1138 		r = gmc_v7_0_vm_init(adev);
1139 		if (r) {
1140 			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1141 			return r;
1142 		}
1143 		adev->vm_manager.enabled = true;
1144 	}
1145 
1146 	return r;
1147 }
1148 
1149 static bool gmc_v7_0_is_idle(void *handle)
1150 {
1151 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152 	u32 tmp = RREG32(mmSRBM_STATUS);
1153 
1154 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1155 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1156 		return false;
1157 
1158 	return true;
1159 }
1160 
1161 static int gmc_v7_0_wait_for_idle(void *handle)
1162 {
1163 	unsigned i;
1164 	u32 tmp;
1165 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1166 
1167 	for (i = 0; i < adev->usec_timeout; i++) {
1168 		/* read MC_STATUS */
1169 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1170 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1171 					       SRBM_STATUS__MCC_BUSY_MASK |
1172 					       SRBM_STATUS__MCD_BUSY_MASK |
1173 					       SRBM_STATUS__VMC_BUSY_MASK);
1174 		if (!tmp)
1175 			return 0;
1176 		udelay(1);
1177 	}
1178 	return -ETIMEDOUT;
1179 
1180 }
1181 
1182 static int gmc_v7_0_soft_reset(void *handle)
1183 {
1184 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185 	struct amdgpu_mode_mc_save save;
1186 	u32 srbm_soft_reset = 0;
1187 	u32 tmp = RREG32(mmSRBM_STATUS);
1188 
1189 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1190 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1191 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1192 
1193 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1194 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1195 		if (!(adev->flags & AMD_IS_APU))
1196 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1197 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1198 	}
1199 
1200 	if (srbm_soft_reset) {
1201 		gmc_v7_0_mc_stop(adev, &save);
1202 		if (gmc_v7_0_wait_for_idle((void *)adev)) {
1203 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1204 		}
1205 
1206 
1207 		tmp = RREG32(mmSRBM_SOFT_RESET);
1208 		tmp |= srbm_soft_reset;
1209 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1210 		WREG32(mmSRBM_SOFT_RESET, tmp);
1211 		tmp = RREG32(mmSRBM_SOFT_RESET);
1212 
1213 		udelay(50);
1214 
1215 		tmp &= ~srbm_soft_reset;
1216 		WREG32(mmSRBM_SOFT_RESET, tmp);
1217 		tmp = RREG32(mmSRBM_SOFT_RESET);
1218 
1219 		/* Wait a little for things to settle down */
1220 		udelay(50);
1221 
1222 		gmc_v7_0_mc_resume(adev, &save);
1223 		udelay(50);
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1230 					     struct amdgpu_irq_src *src,
1231 					     unsigned type,
1232 					     enum amdgpu_interrupt_state state)
1233 {
1234 	u32 tmp;
1235 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1236 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1237 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1238 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1239 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1240 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1241 
1242 	switch (state) {
1243 	case AMDGPU_IRQ_STATE_DISABLE:
1244 		/* system context */
1245 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1246 		tmp &= ~bits;
1247 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1248 		/* VMs */
1249 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1250 		tmp &= ~bits;
1251 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1252 		break;
1253 	case AMDGPU_IRQ_STATE_ENABLE:
1254 		/* system context */
1255 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1256 		tmp |= bits;
1257 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1258 		/* VMs */
1259 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1260 		tmp |= bits;
1261 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1271 				      struct amdgpu_irq_src *source,
1272 				      struct amdgpu_iv_entry *entry)
1273 {
1274 	u32 addr, status, mc_client;
1275 
1276 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1277 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1278 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1279 	/* reset addr and status */
1280 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1281 
1282 	if (!addr && !status)
1283 		return 0;
1284 
1285 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1286 		gmc_v7_0_set_fault_enable_default(adev, false);
1287 
1288 	if (printk_ratelimit()) {
1289 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1290 			entry->src_id, entry->src_data[0]);
1291 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1292 			addr);
1293 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1294 			status);
1295 		gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 static int gmc_v7_0_set_clockgating_state(void *handle,
1302 					  enum amd_clockgating_state state)
1303 {
1304 	bool gate = false;
1305 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1306 
1307 	if (state == AMD_CG_STATE_GATE)
1308 		gate = true;
1309 
1310 	if (!(adev->flags & AMD_IS_APU)) {
1311 		gmc_v7_0_enable_mc_mgcg(adev, gate);
1312 		gmc_v7_0_enable_mc_ls(adev, gate);
1313 	}
1314 	gmc_v7_0_enable_bif_mgls(adev, gate);
1315 	gmc_v7_0_enable_hdp_mgcg(adev, gate);
1316 	gmc_v7_0_enable_hdp_ls(adev, gate);
1317 
1318 	return 0;
1319 }
1320 
1321 static int gmc_v7_0_set_powergating_state(void *handle,
1322 					  enum amd_powergating_state state)
1323 {
1324 	return 0;
1325 }
1326 
1327 static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1328 	.name = "gmc_v7_0",
1329 	.early_init = gmc_v7_0_early_init,
1330 	.late_init = gmc_v7_0_late_init,
1331 	.sw_init = gmc_v7_0_sw_init,
1332 	.sw_fini = gmc_v7_0_sw_fini,
1333 	.hw_init = gmc_v7_0_hw_init,
1334 	.hw_fini = gmc_v7_0_hw_fini,
1335 	.suspend = gmc_v7_0_suspend,
1336 	.resume = gmc_v7_0_resume,
1337 	.is_idle = gmc_v7_0_is_idle,
1338 	.wait_for_idle = gmc_v7_0_wait_for_idle,
1339 	.soft_reset = gmc_v7_0_soft_reset,
1340 	.set_clockgating_state = gmc_v7_0_set_clockgating_state,
1341 	.set_powergating_state = gmc_v7_0_set_powergating_state,
1342 };
1343 
1344 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1345 	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1346 	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
1347 	.set_prt = gmc_v7_0_set_prt,
1348 	.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags
1349 };
1350 
1351 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1352 	.set = gmc_v7_0_vm_fault_interrupt_state,
1353 	.process = gmc_v7_0_process_interrupt,
1354 };
1355 
1356 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1357 {
1358 	if (adev->gart.gart_funcs == NULL)
1359 		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1360 }
1361 
1362 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1363 {
1364 	adev->mc.vm_fault.num_types = 1;
1365 	adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
1366 }
1367 
1368 const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
1369 {
1370 	.type = AMD_IP_BLOCK_TYPE_GMC,
1371 	.major = 7,
1372 	.minor = 0,
1373 	.rev = 0,
1374 	.funcs = &gmc_v7_0_ip_funcs,
1375 };
1376 
1377 const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
1378 {
1379 	.type = AMD_IP_BLOCK_TYPE_GMC,
1380 	.major = 7,
1381 	.minor = 4,
1382 	.rev = 0,
1383 	.funcs = &gmc_v7_0_ip_funcs,
1384 };
1385