xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_8.h"
25 
26 #include "mmhub/mmhub_1_8_0_offset.h"
27 #include "mmhub/mmhub_1_8_0_sh_mask.h"
28 #include "vega10_enum.h"
29 
30 #include "soc15_common.h"
31 #include "soc15.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_psp.h"
34 
35 #define regVM_L2_CNTL3_DEFAULT	0x80100007
36 #define regVM_L2_CNTL4_DEFAULT	0x000000c1
37 
38 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
39 {
40 	u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
41 	u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
42 
43 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
44 	base <<= 24;
45 
46 	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
47 	top <<= 24;
48 
49 	adev->gmc.fb_start = base;
50 	adev->gmc.fb_end = top;
51 
52 	return base;
53 }
54 
55 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
56 				uint64_t page_table_base)
57 {
58 	struct amdgpu_vmhub *hub;
59 	u32 inst_mask;
60 	int i;
61 
62 	inst_mask = adev->aid_mask;
63 	for_each_inst(i, inst_mask) {
64 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
65 		WREG32_SOC15_OFFSET(MMHUB, i,
66 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
67 				    hub->ctx_addr_distance * vmid,
68 				    lower_32_bits(page_table_base));
69 
70 		WREG32_SOC15_OFFSET(MMHUB, i,
71 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
72 				    hub->ctx_addr_distance * vmid,
73 				    upper_32_bits(page_table_base));
74 	}
75 }
76 
77 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
78 {
79 	uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
80 			adev->gmc.vram_start : adev->gmc.fb_start;
81 	uint64_t pt_base;
82 	u32 inst_mask;
83 	int i;
84 
85 	if (adev->gmc.pdb0_bo)
86 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
87 	else
88 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
89 
90 	mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
91 
92 	/* If use GART for FB translation, vmid0 page table covers both
93 	 * vram and system memory (gart)
94 	 */
95 	inst_mask = adev->aid_mask;
96 	for_each_inst(i, inst_mask) {
97 		if (adev->gmc.pdb0_bo) {
98 			WREG32_SOC15(MMHUB, i,
99 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
100 				     (u32)(gart_start >> 12));
101 			WREG32_SOC15(MMHUB, i,
102 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
103 				     (u32)(gart_start >> 44));
104 
105 			WREG32_SOC15(MMHUB, i,
106 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
107 				     (u32)(adev->gmc.gart_end >> 12));
108 			WREG32_SOC15(MMHUB, i,
109 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
110 				     (u32)(adev->gmc.gart_end >> 44));
111 
112 		} else {
113 			WREG32_SOC15(MMHUB, i,
114 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
115 				     (u32)(adev->gmc.gart_start >> 12));
116 			WREG32_SOC15(MMHUB, i,
117 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
118 				     (u32)(adev->gmc.gart_start >> 44));
119 
120 			WREG32_SOC15(MMHUB, i,
121 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
122 				     (u32)(adev->gmc.gart_end >> 12));
123 			WREG32_SOC15(MMHUB, i,
124 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
125 				     (u32)(adev->gmc.gart_end >> 44));
126 		}
127 	}
128 }
129 
130 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
131 {
132 	uint32_t tmp, inst_mask;
133 	uint64_t value;
134 	int i;
135 
136 	if (amdgpu_sriov_vf(adev))
137 		return;
138 
139 	inst_mask = adev->aid_mask;
140 	for_each_inst(i, inst_mask) {
141 		/* Program the AGP BAR */
142 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
143 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
144 			     adev->gmc.agp_start >> 24);
145 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
146 			     adev->gmc.agp_end >> 24);
147 
148 		/* Program the system aperture low logical page number. */
149 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
150 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
151 
152 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
153 			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
154 
155 		/* In the case squeezing vram into GART aperture, we don't use
156 		 * FB aperture and AGP aperture. Disable them.
157 		 */
158 		if (adev->gmc.pdb0_bo) {
159 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
160 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
161 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
162 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
163 				     0x00FFFFFF);
164 			WREG32_SOC15(MMHUB, i,
165 				     regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
166 				     0x3FFFFFFF);
167 			WREG32_SOC15(MMHUB, i,
168 				     regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
169 		}
170 
171 		/* Set default page address. */
172 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
173 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
174 			     (u32)(value >> 12));
175 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
176 			     (u32)(value >> 44));
177 
178 		/* Program "protection fault". */
179 		WREG32_SOC15(MMHUB, i,
180 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
181 			     (u32)(adev->dummy_page_addr >> 12));
182 		WREG32_SOC15(MMHUB, i,
183 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
184 			     (u32)((u64)adev->dummy_page_addr >> 44));
185 
186 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
187 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
188 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
189 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
190 	}
191 }
192 
193 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
194 {
195 	uint32_t tmp, inst_mask;
196 	int i;
197 
198 	if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
199 		tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
200 
201 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
202 				    1);
203 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
204 				    SYSTEM_ACCESS_MODE, 3);
205 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
206 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
207 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
208 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
209 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
210 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
211 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
212 
213 		psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
214 	} else {
215 		inst_mask = adev->aid_mask;
216 		for_each_inst(i, inst_mask) {
217 			tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
218 
219 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
220 					    1);
221 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
222 					    SYSTEM_ACCESS_MODE, 3);
223 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
224 					    ENABLE_ADVANCED_DRIVER_MODEL, 1);
225 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
226 					    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
227 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
228 					    MTYPE, MTYPE_UC);/* XXX for emulation. */
229 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
230 
231 			WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
232 		}
233 	}
234 }
235 
236 /* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
237 static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
238 {
239 	uint32_t tmp, inst_mask;
240 	int i, j;
241 	uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
242 			    regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
243 
244 	if (amdgpu_sriov_vf(adev))
245 		return;
246 
247 	inst_mask = adev->aid_mask;
248 	for_each_inst(i, inst_mask) {
249 		for (j = 0; j < 5; j++) { /* DAGB instances */
250 			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
251 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
252 			tmp |= (1 << 15); /* SDMA client is BIT15 */
253 			WREG32_SOC15_OFFSET(MMHUB, i,
254 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
255 
256 			tmp = RREG32_SOC15_OFFSET(MMHUB, i,
257 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
258 			tmp |= (1 << 15);
259 			WREG32_SOC15_OFFSET(MMHUB, i,
260 				regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
261 		}
262 	}
263 }
264 
265 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
266 {
267 	uint32_t tmp, inst_mask;
268 	int i;
269 
270 	if (amdgpu_sriov_vf(adev))
271 		return;
272 
273 	/* Setup L2 cache */
274 	inst_mask = adev->aid_mask;
275 	for_each_inst(i, inst_mask) {
276 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
277 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
278 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
279 				    ENABLE_L2_FRAGMENT_PROCESSING, 1);
280 		/* XXX for emulation, Refer to closed source code.*/
281 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
282 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
283 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
284 				    0);
285 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
286 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
287 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
288 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
289 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
290 
291 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
292 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
293 				    1);
294 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
295 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
296 
297 		tmp = regVM_L2_CNTL3_DEFAULT;
298 		if (adev->gmc.translate_further) {
299 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
300 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
301 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
302 		} else {
303 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
304 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
305 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
306 		}
307 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
308 
309 		tmp = regVM_L2_CNTL4_DEFAULT;
310 		/* For AMD APP APUs setup WC memory */
311 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
312 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
313 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
314 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
315 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
316 		} else {
317 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
318 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
319 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
320 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
321 		}
322 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
323 	}
324 }
325 
326 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
327 {
328 	uint32_t tmp, inst_mask;
329 	int i;
330 
331 	inst_mask = adev->aid_mask;
332 	for_each_inst(i, inst_mask) {
333 		tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
334 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
335 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
336 				adev->gmc.vmid0_page_table_depth);
337 		tmp = REG_SET_FIELD(tmp,
338 				    VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
339 				    adev->gmc.vmid0_page_table_block_size);
340 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
341 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
342 		WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
343 	}
344 }
345 
346 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
347 {
348 	u32 inst_mask;
349 	int i;
350 
351 	if (amdgpu_sriov_vf(adev))
352 		return;
353 
354 	inst_mask = adev->aid_mask;
355 	for_each_inst(i, inst_mask) {
356 		WREG32_SOC15(MMHUB, i,
357 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
358 			     0XFFFFFFFF);
359 		WREG32_SOC15(MMHUB, i,
360 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
361 			     0x0000000F);
362 
363 		WREG32_SOC15(MMHUB, i,
364 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
365 			     0);
366 		WREG32_SOC15(MMHUB, i,
367 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
368 			     0);
369 
370 		WREG32_SOC15(MMHUB, i,
371 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
372 		WREG32_SOC15(MMHUB, i,
373 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
374 	}
375 }
376 
377 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
378 {
379 	struct amdgpu_vmhub *hub;
380 	unsigned int num_level, block_size;
381 	uint32_t tmp, inst_mask;
382 	int i, j;
383 
384 	num_level = adev->vm_manager.num_level;
385 	block_size = adev->vm_manager.block_size;
386 	if (adev->gmc.translate_further)
387 		num_level -= 1;
388 	else
389 		block_size -= 9;
390 
391 	inst_mask = adev->aid_mask;
392 	for_each_inst(j, inst_mask) {
393 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
394 		for (i = 0; i <= 14; i++) {
395 			tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
396 						  i * hub->ctx_distance);
397 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
398 					    ENABLE_CONTEXT, 1);
399 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
400 					    PAGE_TABLE_DEPTH, num_level);
401 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
402 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
403 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
404 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
405 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
406 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
407 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
408 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
409 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
410 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
411 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
412 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
413 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
414 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
415 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
416 					    PAGE_TABLE_BLOCK_SIZE,
417 					    block_size);
418 			/* On 9.4.3, XNACK can be enabled in the SQ
419 			 * per-process. Retry faults need to be enabled for
420 			 * that to work.
421 			 */
422 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
423 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
424 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
425 					    i * hub->ctx_distance, tmp);
426 			WREG32_SOC15_OFFSET(MMHUB, j,
427 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
428 				i * hub->ctx_addr_distance, 0);
429 			WREG32_SOC15_OFFSET(MMHUB, j,
430 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
431 				i * hub->ctx_addr_distance, 0);
432 			WREG32_SOC15_OFFSET(MMHUB, j,
433 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
434 				i * hub->ctx_addr_distance,
435 				lower_32_bits(adev->vm_manager.max_pfn - 1));
436 			WREG32_SOC15_OFFSET(MMHUB, j,
437 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
438 				i * hub->ctx_addr_distance,
439 				upper_32_bits(adev->vm_manager.max_pfn - 1));
440 		}
441 	}
442 }
443 
444 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
445 {
446 	struct amdgpu_vmhub *hub;
447 	u32 i, j, inst_mask;
448 
449 	inst_mask = adev->aid_mask;
450 	for_each_inst(j, inst_mask) {
451 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
452 		for (i = 0; i < 18; ++i) {
453 			WREG32_SOC15_OFFSET(MMHUB, j,
454 					regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
455 					i * hub->eng_addr_distance, 0xffffffff);
456 			WREG32_SOC15_OFFSET(MMHUB, j,
457 					regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
458 					i * hub->eng_addr_distance, 0x1f);
459 		}
460 	}
461 }
462 
463 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
464 {
465 	/* GART Enable. */
466 	mmhub_v1_8_init_gart_aperture_regs(adev);
467 	mmhub_v1_8_init_system_aperture_regs(adev);
468 	mmhub_v1_8_init_tlb_regs(adev);
469 	mmhub_v1_8_init_cache_regs(adev);
470 	mmhub_v1_8_init_snoop_override_regs(adev);
471 
472 	mmhub_v1_8_enable_system_domain(adev);
473 	mmhub_v1_8_disable_identity_aperture(adev);
474 	mmhub_v1_8_setup_vmid_config(adev);
475 	mmhub_v1_8_program_invalidation(adev);
476 
477 	return 0;
478 }
479 
480 static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
481 {
482 	u32 tmp;
483 	u32 i, inst_mask;
484 
485 	if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
486 		tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
487 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
488 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
489 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
490 		psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
491 	} else {
492 		inst_mask = adev->aid_mask;
493 		for_each_inst(i, inst_mask) {
494 			tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
495 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
496 					    0);
497 			tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
498 					    ENABLE_ADVANCED_DRIVER_MODEL, 0);
499 			WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
500 		}
501 	}
502 }
503 
504 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
505 {
506 	struct amdgpu_vmhub *hub;
507 	u32 tmp;
508 	u32 i, j, inst_mask;
509 
510 	/* Disable all tables */
511 	inst_mask = adev->aid_mask;
512 	for_each_inst(j, inst_mask) {
513 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
514 		for (i = 0; i < 16; i++)
515 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
516 					    i * hub->ctx_distance, 0);
517 		if (!amdgpu_sriov_vf(adev)) {
518 			/* Setup L2 cache */
519 			tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
520 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
521 					    0);
522 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
523 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
524 		}
525 	}
526 
527 	mmhub_v1_8_disable_l1_tlb(adev);
528 }
529 
530 /**
531  * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
532  *
533  * @adev: amdgpu_device pointer
534  * @value: true redirects VM faults to the default page
535  */
536 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
537 {
538 	u32 tmp, inst_mask;
539 	int i;
540 
541 	if (amdgpu_sriov_vf(adev))
542 		return;
543 
544 	inst_mask = adev->aid_mask;
545 	for_each_inst(i, inst_mask) {
546 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
547 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
548 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
549 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
550 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
551 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
552 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
553 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
554 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
555 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
556 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
557 			value);
558 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
559 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
560 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
561 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
562 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
563 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
564 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
565 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
566 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
567 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
568 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
569 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
570 		if (!value) {
571 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
572 					    CRASH_ON_NO_RETRY_FAULT, 1);
573 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
574 					    CRASH_ON_RETRY_FAULT, 1);
575 		}
576 
577 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
578 	}
579 }
580 
581 static void mmhub_v1_8_init(struct amdgpu_device *adev)
582 {
583 	struct amdgpu_vmhub *hub;
584 	u32 inst_mask;
585 	int i;
586 
587 	inst_mask = adev->aid_mask;
588 	for_each_inst(i, inst_mask) {
589 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
590 
591 		hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
592 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
593 		hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
594 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
595 		hub->vm_inv_eng0_req =
596 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
597 		hub->vm_inv_eng0_ack =
598 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
599 		hub->vm_context0_cntl =
600 			SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
601 		hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
602 			regVM_L2_PROTECTION_FAULT_STATUS);
603 		hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
604 			regVM_L2_PROTECTION_FAULT_CNTL);
605 
606 		hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
607 		hub->ctx_addr_distance =
608 			regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
609 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
610 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
611 			regVM_INVALIDATE_ENG0_REQ;
612 		hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
613 			regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
614 	}
615 }
616 
617 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
618 				      enum amd_clockgating_state state)
619 {
620 	return 0;
621 }
622 
623 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
624 {
625 
626 }
627 
628 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
629 	.get_fb_location = mmhub_v1_8_get_fb_location,
630 	.init = mmhub_v1_8_init,
631 	.gart_enable = mmhub_v1_8_gart_enable,
632 	.set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
633 	.gart_disable = mmhub_v1_8_gart_disable,
634 	.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
635 	.set_clockgating = mmhub_v1_8_set_clockgating,
636 	.get_clockgating = mmhub_v1_8_get_clockgating,
637 };
638 
639 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
640 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI),
641 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
642 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI),
643 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
644 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI),
645 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
646 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI),
647 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
648 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI),
649 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
650 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI),
651 	1, 0, "MM_CANE"},
652 };
653 
654 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = {
655 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI),
656 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
657 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI),
658 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
659 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI),
660 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
661 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI),
662 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
663 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI),
664 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
665 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI),
666 	1, 0, "MM_CANE"},
667 };
668 
669 static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = {
670 	{AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"},
671 	{AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"},
672 	{AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"},
673 	{AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"},
674 	{AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"},
675 	{AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"},
676 	{AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"},
677 	{AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"},
678 	{AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"},
679 	{AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"},
680 	{AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"},
681 	{AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"},
682 	{AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"},
683 	{AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"},
684 	{AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"},
685 	{AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"},
686 	{AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"},
687 	{AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"},
688 	{AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"},
689 };
690 
691 static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
692 						  uint32_t mmhub_inst,
693 						  void *ras_err_status)
694 {
695 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
696 	unsigned long ue_count = 0, ce_count = 0;
697 
698 	/* NOTE: mmhub is converted by aid_mask and the range is 0-3,
699 	 * which can be used as die ID directly */
700 	struct amdgpu_smuio_mcm_config_info mcm_info = {
701 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
702 		.die_id = mmhub_inst,
703 	};
704 
705 	amdgpu_ras_inst_query_ras_error_count(adev,
706 					mmhub_v1_8_ce_reg_list,
707 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
708 					mmhub_v1_8_ras_memory_list,
709 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
710 					mmhub_inst,
711 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
712 					&ce_count);
713 	amdgpu_ras_inst_query_ras_error_count(adev,
714 					mmhub_v1_8_ue_reg_list,
715 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
716 					mmhub_v1_8_ras_memory_list,
717 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
718 					mmhub_inst,
719 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
720 					&ue_count);
721 
722 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
723 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
724 }
725 
726 static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
727 					     void *ras_err_status)
728 {
729 	uint32_t inst_mask;
730 	uint32_t i;
731 
732 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
733 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
734 		return;
735 	}
736 
737 	inst_mask = adev->aid_mask;
738 	for_each_inst(i, inst_mask)
739 		mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status);
740 }
741 
742 static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev,
743 						  uint32_t mmhub_inst)
744 {
745 	amdgpu_ras_inst_reset_ras_error_count(adev,
746 					mmhub_v1_8_ce_reg_list,
747 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
748 					mmhub_inst);
749 	amdgpu_ras_inst_reset_ras_error_count(adev,
750 					mmhub_v1_8_ue_reg_list,
751 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
752 					mmhub_inst);
753 }
754 
755 static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
756 {
757 	uint32_t inst_mask;
758 	uint32_t i;
759 
760 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
761 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
762 		return;
763 	}
764 
765 	inst_mask = adev->aid_mask;
766 	for_each_inst(i, inst_mask)
767 		mmhub_v1_8_inst_reset_ras_error_count(adev, i);
768 }
769 
770 static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
771 	.query_ras_error_count = mmhub_v1_8_query_ras_error_count,
772 	.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
773 };
774 
775 static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
776 				      enum aca_smu_type type, void *data)
777 {
778 	struct aca_bank_info info;
779 	u64 misc0;
780 	int ret;
781 
782 	ret = aca_bank_info_decode(bank, &info);
783 	if (ret)
784 		return ret;
785 
786 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
787 	switch (type) {
788 	case ACA_SMU_TYPE_UE:
789 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
790 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
791 						     1ULL);
792 		break;
793 	case ACA_SMU_TYPE_CE:
794 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
795 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
796 						     ACA_REG__MISC0__ERRCNT(misc0));
797 		break;
798 	default:
799 		return -EINVAL;
800 	}
801 
802 	return ret;
803 }
804 
805 /* reference to smu driver if header file */
806 static int mmhub_v1_8_err_codes[] = {
807 	0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
808 	5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
809 	10, /* CODE_UTCL2_ROUTER */
810 	11, /* CODE_VML2 */
811 	12, /* CODE_VML2_WALKER */
812 	13, /* CODE_MMCANE */
813 };
814 
815 static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
816 					 enum aca_smu_type type, void *data)
817 {
818 	u32 instlo;
819 
820 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
821 	instlo &= GENMASK(31, 1);
822 
823 	if (instlo != mmSMNAID_AID0_MCA_SMU)
824 		return false;
825 
826 	if (aca_bank_check_error_codes(handle->adev, bank,
827 				       mmhub_v1_8_err_codes,
828 				       ARRAY_SIZE(mmhub_v1_8_err_codes)))
829 		return false;
830 
831 	return true;
832 }
833 
834 static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
835 	.aca_bank_parser = mmhub_v1_8_aca_bank_parser,
836 	.aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
837 };
838 
839 static const struct aca_info mmhub_v1_8_aca_info = {
840 	.hwip = ACA_HWIP_TYPE_SMU,
841 	.mask = ACA_ERROR_UE_MASK,
842 	.bank_ops = &mmhub_v1_8_aca_bank_ops,
843 };
844 
845 static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
846 {
847 	int r;
848 
849 	r = amdgpu_ras_block_late_init(adev, ras_block);
850 	if (r)
851 		return r;
852 
853 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB,
854 				&mmhub_v1_8_aca_info, NULL);
855 	if (r)
856 		goto late_fini;
857 
858 	return 0;
859 
860 late_fini:
861 	amdgpu_ras_block_late_fini(adev, ras_block);
862 
863 	return r;
864 }
865 
866 struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
867 	.ras_block = {
868 		.hw_ops = &mmhub_v1_8_ras_hw_ops,
869 		.ras_late_init = mmhub_v1_8_ras_late_init,
870 	},
871 };
872