xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c (revision 00f85667faf03591666a3a447dc0d489ea9f0cb4)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_8.h"
25 
26 #include "mmhub/mmhub_1_8_0_offset.h"
27 #include "mmhub/mmhub_1_8_0_sh_mask.h"
28 #include "vega10_enum.h"
29 
30 #include "soc15_common.h"
31 #include "soc15.h"
32 #include "amdgpu_ras.h"
33 
34 #define regVM_L2_CNTL3_DEFAULT	0x80100007
35 #define regVM_L2_CNTL4_DEFAULT	0x000000c1
36 
37 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
38 {
39 	u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
40 	u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
41 
42 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
43 	base <<= 24;
44 
45 	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
46 	top <<= 24;
47 
48 	adev->gmc.fb_start = base;
49 	adev->gmc.fb_end = top;
50 
51 	return base;
52 }
53 
54 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
55 				uint64_t page_table_base)
56 {
57 	struct amdgpu_vmhub *hub;
58 	u32 inst_mask;
59 	int i;
60 
61 	inst_mask = adev->aid_mask;
62 	for_each_inst(i, inst_mask) {
63 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
64 		WREG32_SOC15_OFFSET(MMHUB, i,
65 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
66 				    hub->ctx_addr_distance * vmid,
67 				    lower_32_bits(page_table_base));
68 
69 		WREG32_SOC15_OFFSET(MMHUB, i,
70 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
71 				    hub->ctx_addr_distance * vmid,
72 				    upper_32_bits(page_table_base));
73 	}
74 }
75 
76 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
77 {
78 	uint64_t pt_base;
79 	u32 inst_mask;
80 	int i;
81 
82 	if (adev->gmc.pdb0_bo)
83 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
84 	else
85 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
86 
87 	mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
88 
89 	/* If use GART for FB translation, vmid0 page table covers both
90 	 * vram and system memory (gart)
91 	 */
92 	inst_mask = adev->aid_mask;
93 	for_each_inst(i, inst_mask) {
94 		if (adev->gmc.pdb0_bo) {
95 			WREG32_SOC15(MMHUB, i,
96 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
97 				     (u32)(adev->gmc.fb_start >> 12));
98 			WREG32_SOC15(MMHUB, i,
99 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
100 				     (u32)(adev->gmc.fb_start >> 44));
101 
102 			WREG32_SOC15(MMHUB, i,
103 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
104 				     (u32)(adev->gmc.gart_end >> 12));
105 			WREG32_SOC15(MMHUB, i,
106 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
107 				     (u32)(adev->gmc.gart_end >> 44));
108 
109 		} else {
110 			WREG32_SOC15(MMHUB, i,
111 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
112 				     (u32)(adev->gmc.gart_start >> 12));
113 			WREG32_SOC15(MMHUB, i,
114 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
115 				     (u32)(adev->gmc.gart_start >> 44));
116 
117 			WREG32_SOC15(MMHUB, i,
118 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
119 				     (u32)(adev->gmc.gart_end >> 12));
120 			WREG32_SOC15(MMHUB, i,
121 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
122 				     (u32)(adev->gmc.gart_end >> 44));
123 		}
124 	}
125 }
126 
127 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
128 {
129 	uint32_t tmp, inst_mask;
130 	uint64_t value;
131 	int i;
132 
133 	if (amdgpu_sriov_vf(adev))
134 		return;
135 
136 	inst_mask = adev->aid_mask;
137 	for_each_inst(i, inst_mask) {
138 		/* Program the AGP BAR */
139 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
140 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
141 			     adev->gmc.agp_start >> 24);
142 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
143 			     adev->gmc.agp_end >> 24);
144 
145 		/* Program the system aperture low logical page number. */
146 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
147 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
148 
149 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
150 			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
151 
152 		/* In the case squeezing vram into GART aperture, we don't use
153 		 * FB aperture and AGP aperture. Disable them.
154 		 */
155 		if (adev->gmc.pdb0_bo) {
156 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
157 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
158 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
159 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
160 				     0x00FFFFFF);
161 			WREG32_SOC15(MMHUB, i,
162 				     regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
163 				     0x3FFFFFFF);
164 			WREG32_SOC15(MMHUB, i,
165 				     regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
166 		}
167 
168 		/* Set default page address. */
169 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
170 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
171 			     (u32)(value >> 12));
172 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
173 			     (u32)(value >> 44));
174 
175 		/* Program "protection fault". */
176 		WREG32_SOC15(MMHUB, i,
177 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
178 			     (u32)(adev->dummy_page_addr >> 12));
179 		WREG32_SOC15(MMHUB, i,
180 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
181 			     (u32)((u64)adev->dummy_page_addr >> 44));
182 
183 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
184 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
185 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
186 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
187 	}
188 }
189 
190 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
191 {
192 	uint32_t tmp, inst_mask;
193 	int i;
194 
195 	/* Setup TLB control */
196 	inst_mask = adev->aid_mask;
197 	for_each_inst(i, inst_mask) {
198 		tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
199 
200 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
201 				    1);
202 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
203 				    SYSTEM_ACCESS_MODE, 3);
204 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
205 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
206 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
208 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
210 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
211 
212 		WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
213 	}
214 }
215 
216 /* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
217 static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
218 {
219 	uint32_t tmp, inst_mask;
220 	int i, j;
221 	uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
222 			    regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
223 
224 	inst_mask = adev->aid_mask;
225 	for_each_inst(i, inst_mask) {
226 		for (j = 0; j < 5; j++) { /* DAGB instances */
227 			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
228 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
229 			tmp |= (1 << 15); /* SDMA client is BIT15 */
230 			WREG32_SOC15_OFFSET(MMHUB, i,
231 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
232 
233 			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
234 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
235 			tmp |= (1 << 15);
236 			WREG32_SOC15_OFFSET(MMHUB, i,
237 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
238 		}
239 	}
240 }
241 
242 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
243 {
244 	uint32_t tmp, inst_mask;
245 	int i;
246 
247 	if (amdgpu_sriov_vf(adev))
248 		return;
249 
250 	/* Setup L2 cache */
251 	inst_mask = adev->aid_mask;
252 	for_each_inst(i, inst_mask) {
253 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
254 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
255 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
256 				    ENABLE_L2_FRAGMENT_PROCESSING, 1);
257 		/* XXX for emulation, Refer to closed source code.*/
258 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
259 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
260 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
261 				    0);
262 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
263 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
264 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
265 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
266 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
267 
268 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
269 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
270 				    1);
271 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
272 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
273 
274 		tmp = regVM_L2_CNTL3_DEFAULT;
275 		if (adev->gmc.translate_further) {
276 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
277 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
278 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
279 		} else {
280 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
281 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
282 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
283 		}
284 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
285 
286 		tmp = regVM_L2_CNTL4_DEFAULT;
287 		/* For AMD APP APUs setup WC memory */
288 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
289 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
290 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
291 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
292 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
293 		} else {
294 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
295 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
296 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
297 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
298 		}
299 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
300 	}
301 }
302 
303 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
304 {
305 	uint32_t tmp, inst_mask;
306 	int i;
307 
308 	inst_mask = adev->aid_mask;
309 	for_each_inst(i, inst_mask) {
310 		tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
311 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
312 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
313 				adev->gmc.vmid0_page_table_depth);
314 		tmp = REG_SET_FIELD(tmp,
315 				    VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
316 				    adev->gmc.vmid0_page_table_block_size);
317 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
318 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
319 		WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
320 	}
321 }
322 
323 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
324 {
325 	u32 inst_mask;
326 	int i;
327 
328 	if (amdgpu_sriov_vf(adev))
329 		return;
330 
331 	inst_mask = adev->aid_mask;
332 	for_each_inst(i, inst_mask) {
333 		WREG32_SOC15(MMHUB, i,
334 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
335 			     0XFFFFFFFF);
336 		WREG32_SOC15(MMHUB, i,
337 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
338 			     0x0000000F);
339 
340 		WREG32_SOC15(MMHUB, i,
341 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
342 			     0);
343 		WREG32_SOC15(MMHUB, i,
344 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
345 			     0);
346 
347 		WREG32_SOC15(MMHUB, i,
348 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
349 		WREG32_SOC15(MMHUB, i,
350 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
351 	}
352 }
353 
354 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
355 {
356 	struct amdgpu_vmhub *hub;
357 	unsigned int num_level, block_size;
358 	uint32_t tmp, inst_mask;
359 	int i, j;
360 
361 	num_level = adev->vm_manager.num_level;
362 	block_size = adev->vm_manager.block_size;
363 	if (adev->gmc.translate_further)
364 		num_level -= 1;
365 	else
366 		block_size -= 9;
367 
368 	inst_mask = adev->aid_mask;
369 	for_each_inst(j, inst_mask) {
370 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
371 		for (i = 0; i <= 14; i++) {
372 			tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
373 						  i * hub->ctx_distance);
374 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
375 					    ENABLE_CONTEXT, 1);
376 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
377 					    PAGE_TABLE_DEPTH, num_level);
378 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
379 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
380 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
381 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
382 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
383 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
384 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
385 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
386 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
387 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
388 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
389 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
390 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
391 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
392 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
393 					    PAGE_TABLE_BLOCK_SIZE,
394 					    block_size);
395 			/* On 9.4.3, XNACK can be enabled in the SQ
396 			 * per-process. Retry faults need to be enabled for
397 			 * that to work.
398 			 */
399 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
400 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
401 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
402 					    i * hub->ctx_distance, tmp);
403 			WREG32_SOC15_OFFSET(MMHUB, j,
404 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
405 				i * hub->ctx_addr_distance, 0);
406 			WREG32_SOC15_OFFSET(MMHUB, j,
407 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
408 				i * hub->ctx_addr_distance, 0);
409 			WREG32_SOC15_OFFSET(MMHUB, j,
410 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
411 				i * hub->ctx_addr_distance,
412 				lower_32_bits(adev->vm_manager.max_pfn - 1));
413 			WREG32_SOC15_OFFSET(MMHUB, j,
414 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
415 				i * hub->ctx_addr_distance,
416 				upper_32_bits(adev->vm_manager.max_pfn - 1));
417 		}
418 	}
419 }
420 
421 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
422 {
423 	struct amdgpu_vmhub *hub;
424 	u32 i, j, inst_mask;
425 
426 	inst_mask = adev->aid_mask;
427 	for_each_inst(j, inst_mask) {
428 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
429 		for (i = 0; i < 18; ++i) {
430 			WREG32_SOC15_OFFSET(MMHUB, j,
431 					regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
432 					i * hub->eng_addr_distance, 0xffffffff);
433 			WREG32_SOC15_OFFSET(MMHUB, j,
434 					regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
435 					i * hub->eng_addr_distance, 0x1f);
436 		}
437 	}
438 }
439 
440 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
441 {
442 	/* GART Enable. */
443 	mmhub_v1_8_init_gart_aperture_regs(adev);
444 	mmhub_v1_8_init_system_aperture_regs(adev);
445 	mmhub_v1_8_init_tlb_regs(adev);
446 	mmhub_v1_8_init_cache_regs(adev);
447 	mmhub_v1_8_init_snoop_override_regs(adev);
448 
449 	mmhub_v1_8_enable_system_domain(adev);
450 	mmhub_v1_8_disable_identity_aperture(adev);
451 	mmhub_v1_8_setup_vmid_config(adev);
452 	mmhub_v1_8_program_invalidation(adev);
453 
454 	return 0;
455 }
456 
457 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
458 {
459 	struct amdgpu_vmhub *hub;
460 	u32 tmp;
461 	u32 i, j, inst_mask;
462 
463 	/* Disable all tables */
464 	inst_mask = adev->aid_mask;
465 	for_each_inst(j, inst_mask) {
466 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
467 		for (i = 0; i < 16; i++)
468 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
469 					    i * hub->ctx_distance, 0);
470 
471 		/* Setup TLB control */
472 		tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
473 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
474 				    0);
475 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
476 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
477 		WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
478 
479 		if (!amdgpu_sriov_vf(adev)) {
480 			/* Setup L2 cache */
481 			tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
482 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
483 					    0);
484 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
485 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
486 		}
487 	}
488 }
489 
490 /**
491  * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
492  *
493  * @adev: amdgpu_device pointer
494  * @value: true redirects VM faults to the default page
495  */
496 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
497 {
498 	u32 tmp, inst_mask;
499 	int i;
500 
501 	if (amdgpu_sriov_vf(adev))
502 		return;
503 
504 	inst_mask = adev->aid_mask;
505 	for_each_inst(i, inst_mask) {
506 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
507 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
512 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
513 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
514 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
515 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
516 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
517 			value);
518 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
519 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
520 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
521 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
522 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
523 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
524 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
525 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
526 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
527 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
528 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
529 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
530 		if (!value) {
531 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
532 					    CRASH_ON_NO_RETRY_FAULT, 1);
533 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
534 					    CRASH_ON_RETRY_FAULT, 1);
535 		}
536 
537 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
538 	}
539 }
540 
541 static void mmhub_v1_8_init(struct amdgpu_device *adev)
542 {
543 	struct amdgpu_vmhub *hub;
544 	u32 inst_mask;
545 	int i;
546 
547 	inst_mask = adev->aid_mask;
548 	for_each_inst(i, inst_mask) {
549 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
550 
551 		hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
552 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
553 		hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
554 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
555 		hub->vm_inv_eng0_req =
556 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
557 		hub->vm_inv_eng0_ack =
558 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
559 		hub->vm_context0_cntl =
560 			SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
561 		hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
562 			regVM_L2_PROTECTION_FAULT_STATUS);
563 		hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
564 			regVM_L2_PROTECTION_FAULT_CNTL);
565 
566 		hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
567 		hub->ctx_addr_distance =
568 			regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
569 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
570 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
571 			regVM_INVALIDATE_ENG0_REQ;
572 		hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
573 			regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
574 	}
575 }
576 
577 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
578 				      enum amd_clockgating_state state)
579 {
580 	return 0;
581 }
582 
583 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
584 {
585 
586 }
587 
588 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
589 	.get_fb_location = mmhub_v1_8_get_fb_location,
590 	.init = mmhub_v1_8_init,
591 	.gart_enable = mmhub_v1_8_gart_enable,
592 	.set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
593 	.gart_disable = mmhub_v1_8_gart_disable,
594 	.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
595 	.set_clockgating = mmhub_v1_8_set_clockgating,
596 	.get_clockgating = mmhub_v1_8_get_clockgating,
597 };
598 
599 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
600 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI),
601 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
602 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI),
603 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
604 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI),
605 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
606 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI),
607 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
608 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI),
609 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
610 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI),
611 	1, 0, "MM_CANE"},
612 };
613 
614 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = {
615 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI),
616 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
617 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI),
618 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
619 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI),
620 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
621 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI),
622 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
623 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI),
624 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
625 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI),
626 	1, 0, "MM_CANE"},
627 };
628 
629 static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = {
630 	{AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"},
631 	{AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"},
632 	{AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"},
633 	{AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"},
634 	{AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"},
635 	{AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"},
636 	{AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"},
637 	{AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"},
638 	{AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"},
639 	{AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"},
640 	{AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"},
641 	{AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"},
642 	{AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"},
643 	{AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"},
644 	{AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"},
645 	{AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"},
646 	{AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"},
647 	{AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"},
648 	{AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"},
649 };
650 
651 static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
652 						  uint32_t mmhub_inst,
653 						  void *ras_err_status)
654 {
655 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
656 	unsigned long ue_count = 0, ce_count = 0;
657 
658 	/* NOTE: mmhub is converted by aid_mask and the range is 0-3,
659 	 * which can be used as die ID directly */
660 	struct amdgpu_smuio_mcm_config_info mcm_info = {
661 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
662 		.die_id = mmhub_inst,
663 	};
664 
665 	amdgpu_ras_inst_query_ras_error_count(adev,
666 					mmhub_v1_8_ce_reg_list,
667 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
668 					mmhub_v1_8_ras_memory_list,
669 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
670 					mmhub_inst,
671 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
672 					&ce_count);
673 	amdgpu_ras_inst_query_ras_error_count(adev,
674 					mmhub_v1_8_ue_reg_list,
675 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
676 					mmhub_v1_8_ras_memory_list,
677 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
678 					mmhub_inst,
679 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
680 					&ue_count);
681 
682 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
683 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
684 }
685 
686 static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
687 					     void *ras_err_status)
688 {
689 	uint32_t inst_mask;
690 	uint32_t i;
691 
692 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
693 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
694 		return;
695 	}
696 
697 	inst_mask = adev->aid_mask;
698 	for_each_inst(i, inst_mask)
699 		mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status);
700 }
701 
702 static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev,
703 						  uint32_t mmhub_inst)
704 {
705 	amdgpu_ras_inst_reset_ras_error_count(adev,
706 					mmhub_v1_8_ce_reg_list,
707 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
708 					mmhub_inst);
709 	amdgpu_ras_inst_reset_ras_error_count(adev,
710 					mmhub_v1_8_ue_reg_list,
711 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
712 					mmhub_inst);
713 }
714 
715 static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
716 {
717 	uint32_t inst_mask;
718 	uint32_t i;
719 
720 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
721 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
722 		return;
723 	}
724 
725 	inst_mask = adev->aid_mask;
726 	for_each_inst(i, inst_mask)
727 		mmhub_v1_8_inst_reset_ras_error_count(adev, i);
728 }
729 
730 static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
731 	.query_ras_error_count = mmhub_v1_8_query_ras_error_count,
732 	.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
733 };
734 
735 static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
736 				      enum aca_smu_type type, void *data)
737 {
738 	struct aca_bank_info info;
739 	u64 misc0;
740 	int ret;
741 
742 	ret = aca_bank_info_decode(bank, &info);
743 	if (ret)
744 		return ret;
745 
746 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
747 	switch (type) {
748 	case ACA_SMU_TYPE_UE:
749 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
750 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
751 						     1ULL);
752 		break;
753 	case ACA_SMU_TYPE_CE:
754 		bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
755 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
756 						     ACA_REG__MISC0__ERRCNT(misc0));
757 		break;
758 	default:
759 		return -EINVAL;
760 	}
761 
762 	return ret;
763 }
764 
765 /* reference to smu driver if header file */
766 static int mmhub_v1_8_err_codes[] = {
767 	0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
768 	5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
769 	10, /* CODE_UTCL2_ROUTER */
770 	11, /* CODE_VML2 */
771 	12, /* CODE_VML2_WALKER */
772 	13, /* CODE_MMCANE */
773 };
774 
775 static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
776 					 enum aca_smu_type type, void *data)
777 {
778 	u32 instlo;
779 
780 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
781 	instlo &= GENMASK(31, 1);
782 
783 	if (instlo != mmSMNAID_AID0_MCA_SMU)
784 		return false;
785 
786 	if (aca_bank_check_error_codes(handle->adev, bank,
787 				       mmhub_v1_8_err_codes,
788 				       ARRAY_SIZE(mmhub_v1_8_err_codes)))
789 		return false;
790 
791 	return true;
792 }
793 
794 static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
795 	.aca_bank_parser = mmhub_v1_8_aca_bank_parser,
796 	.aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
797 };
798 
799 static const struct aca_info mmhub_v1_8_aca_info = {
800 	.hwip = ACA_HWIP_TYPE_SMU,
801 	.mask = ACA_ERROR_UE_MASK,
802 	.bank_ops = &mmhub_v1_8_aca_bank_ops,
803 };
804 
805 static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
806 {
807 	int r;
808 
809 	r = amdgpu_ras_block_late_init(adev, ras_block);
810 	if (r)
811 		return r;
812 
813 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB,
814 				&mmhub_v1_8_aca_info, NULL);
815 	if (r)
816 		goto late_fini;
817 
818 	return 0;
819 
820 late_fini:
821 	amdgpu_ras_block_late_fini(adev, ras_block);
822 
823 	return r;
824 }
825 
826 struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
827 	.ras_block = {
828 		.hw_ops = &mmhub_v1_8_ras_hw_ops,
829 		.ras_late_init = mmhub_v1_8_ras_late_init,
830 	},
831 };
832