xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "mmhub_v4_2_0.h"
26 
27 #include "mmhub/mmhub_4_2_0_offset.h"
28 #include "mmhub/mmhub_4_2_0_sh_mask.h"
29 
30 #include "soc15_common.h"
31 #include "soc24_enum.h"
32 
33 #define regMMVM_L2_CNTL3_DEFAULT				0x80100007
34 #define regMMVM_L2_CNTL4_DEFAULT				0x000000c1
35 #define regMMVM_L2_CNTL5_DEFAULT				0x00003fe0
36 
37 static const char *mmhub_client_ids_v4_2_0[][2] = {
38 	[0][0] = "VMC",
39 	[4][0] = "DCEDMC",
40 	[5][0] = "DCEVGA",
41 	[6][0] = "MP0",
42 	[7][0] = "MP1",
43 	[8][0] = "MPIO",
44 	[16][0] = "HDP",
45 	[17][0] = "LSDMA",
46 	[18][0] = "JPEG",
47 	[19][0] = "VCNU0",
48 	[21][0] = "VSCH",
49 	[22][0] = "VCNU1",
50 	[23][0] = "VCN1",
51 	[32+20][0] = "VCN0",
52 	[2][1] = "DBGUNBIO",
53 	[3][1] = "DCEDWB",
54 	[4][1] = "DCEDMC",
55 	[5][1] = "DCEVGA",
56 	[6][1] = "MP0",
57 	[7][1] = "MP1",
58 	[8][1] = "MPIO",
59 	[10][1] = "DBGU0",
60 	[11][1] = "DBGU1",
61 	[12][1] = "DBGU2",
62 	[13][1] = "DBGU3",
63 	[14][1] = "XDP",
64 	[15][1] = "OSSSYS",
65 	[16][1] = "HDP",
66 	[17][1] = "LSDMA",
67 	[18][1] = "JPEG",
68 	[19][1] = "VCNU0",
69 	[20][1] = "VCN0",
70 	[21][1] = "VSCH",
71 	[22][1] = "VCNU1",
72 	[23][1] = "VCN1",
73 };
74 
75 static u64 mmhub_v4_2_0_get_fb_location(struct amdgpu_device *adev)
76 {
77 	u64 base;
78 
79 	base = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
80 			    regMMMC_VM_FB_LOCATION_BASE_LO32);
81 	base &= MMMC_VM_FB_LOCATION_BASE_LO32__FB_BASE_LO32_MASK;
82 	base <<= 24;
83 
84 	base |= ((u64)(MMMC_VM_FB_LOCATION_BASE_HI32__FB_BASE_HI1_MASK &
85 		       RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
86 				    regMMMC_VM_FB_LOCATION_BASE_HI32)) << 56);
87 
88 	return base;
89 }
90 
91 static u64 mmhub_v4_2_0_get_mc_fb_offset(struct amdgpu_device *adev)
92 {
93 	return (u64)RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
94 			         regMMMC_VM_FB_OFFSET) << 24;
95 }
96 
97 static void mmhub_v4_2_0_mid_setup_vm_pt_regs(struct amdgpu_device *adev,
98 					      uint32_t vmid,
99 					      uint64_t page_table_base,
100 					      uint32_t mid_mask)
101 {
102 	struct amdgpu_vmhub *hub;
103 	int i;
104 
105 	for_each_inst(i, mid_mask) {
106 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
107 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
108 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
109 				    hub->ctx_addr_distance * vmid,
110 				    lower_32_bits(page_table_base));
111 
112 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
113 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
114 				    hub->ctx_addr_distance * vmid,
115 				    upper_32_bits(page_table_base));
116 	}
117 }
118 
119 static void mmhub_v4_2_0_setup_vm_pt_regs(struct amdgpu_device *adev,
120 					  uint32_t vmid,
121 					  uint64_t page_table_base)
122 {
123 	uint32_t mid_mask;
124 
125 	mid_mask = adev->aid_mask;
126 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, vmid,
127 					  page_table_base,
128 					  mid_mask);
129 }
130 
131 static void mmhub_v4_2_0_mid_init_gart_aperture_regs(struct amdgpu_device *adev,
132 						     uint32_t mid_mask)
133 {
134 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
135 	int i;
136 
137 	if (adev->gmc.pdb0_bo)
138 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
139 	else
140 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
141 
142 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, 0, pt_base, mid_mask);
143 
144 	for_each_inst(i, mid_mask) {
145 		if (adev->gmc.pdb0_bo) {
146 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
147 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
148 				     (u32)(adev->gmc.fb_start >> 12));
149 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
150 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
151 				     (u32)(adev->gmc.fb_start >> 44));
152 
153 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
154 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
155 				     (u32)(adev->gmc.fb_end >> 12));
156 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
157 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
158 				     (u32)(adev->gmc.fb_end >> 44));
159 		} else {
160 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
161 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
162 				     (u32)(adev->gmc.gart_start >> 12));
163 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
164 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
165 				     (u32)(adev->gmc.gart_start >> 44));
166 
167 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
168 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
169 				     (u32)(adev->gmc.gart_end >> 12));
170 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
171 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
172 				     (u32)(adev->gmc.gart_end >> 44));
173 		}
174 	}
175 }
176 
177 static void mmhub_v4_2_0_mid_init_system_aperture_regs(struct amdgpu_device *adev,
178 						       uint32_t mid_mask)
179 {
180 	uint64_t value;
181 	uint32_t tmp;
182 	int i;
183 
184 	/*
185 	 * the new L1 policy will block SRIOV guest from writing
186 	 * these regs, and they will be programed at host.
187 	 * so skip programing these regs.
188 	 */
189 	if (amdgpu_sriov_vf(adev))
190 		return;
191 
192 	for_each_inst(i, mid_mask) {
193 		/* Program the AGP BAR */
194 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
195 			     regMMMC_VM_AGP_BASE_LO32, 0);
196 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
197 			     regMMMC_VM_AGP_BASE_HI32, 0);
198 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
199 			     regMMMC_VM_AGP_BOT_LO32,
200 			     lower_32_bits(adev->gmc.agp_start >> 24));
201 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
202 			     regMMMC_VM_AGP_BOT_HI32,
203 			     upper_32_bits(adev->gmc.agp_start >> 24));
204 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
205 			     regMMMC_VM_AGP_TOP_LO32,
206 			     lower_32_bits(adev->gmc.agp_end >> 24));
207 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
208 			     regMMMC_VM_AGP_TOP_HI32,
209 			     upper_32_bits(adev->gmc.agp_end >> 24));
210 
211 		/* Program the system aperture low logical page number. */
212 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
213 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
214 			     lower_32_bits(min(adev->gmc.fb_start,
215 					       adev->gmc.agp_start) >> 18));
216 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
217 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
218 			     upper_32_bits(min(adev->gmc.fb_start,
219 					       adev->gmc.agp_start) >> 18));
220 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
221 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32,
222 			     lower_32_bits(max(adev->gmc.fb_end,
223 					       adev->gmc.agp_end) >> 18));
224 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
225 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32,
226 			     upper_32_bits(max(adev->gmc.fb_end,
227 					       adev->gmc.agp_end) >> 18));
228 
229 		/* Set default page address. */
230 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
231 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
232 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
233 			     (u32)(value >> 12));
234 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
235 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
236 			     (u32)(value >> 44));
237 
238 		/* Program "protection fault". */
239 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
240 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
241 			     (u32)(adev->dummy_page_addr >> 12));
242 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
243 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
244 			     (u32)((u64)adev->dummy_page_addr >> 44));
245 
246 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
247 				   regMMVM_L2_PROTECTION_FAULT_CNTL2);
248 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
249 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
250 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
251 				    ENABLE_RETRY_FAULT_INTERRUPT, 0x1);
252 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
253 			     regMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
254 	}
255 
256 	/* In the case squeezing vram into GART aperture, we don't use
257 	 * FB aperture and AGP aperture. Disable them.
258 	 */
259 	if (adev->gmc.pdb0_bo) {
260 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
261 			     regMMMC_VM_FB_LOCATION_TOP_LO32, 0);
262 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
263 			     regMMMC_VM_FB_LOCATION_TOP_HI32, 0);
264 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
265 			     regMMMC_VM_FB_LOCATION_BASE_LO32, 0xFFFFFFFF);
266 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
267 			     regMMMC_VM_FB_LOCATION_BASE_HI32, 1);
268 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
269 			     regMMMC_VM_AGP_TOP_LO32, 0);
270 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
271 			     regMMMC_VM_AGP_TOP_HI32, 0);
272 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
273 			     regMMMC_VM_AGP_BOT_LO32, 0xFFFFFFFF);
274 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
275 			     regMMMC_VM_AGP_BOT_HI32, 1);
276 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
277 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
278 			     0xFFFFFFFF);
279 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
280 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
281 			     0x7F);
282 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
283 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32, 0);
284 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
285 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32, 0);
286 	}
287 }
288 
289 static void mmhub_v4_2_0_mid_init_tlb_regs(struct amdgpu_device *adev,
290 					   uint32_t mid_mask)
291 {
292 	uint32_t tmp;
293 	int i;
294 
295 	for_each_inst(i, mid_mask) {
296 		/* Setup TLB control */
297 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
298 				   regMMMC_VM_MX_L1_TLB_CNTL);
299 
300 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
301 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
302 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
303 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
304 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
305 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
306 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
307 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
308 				    MTYPE, MTYPE_UC); /* UC, uncached */
309 
310 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
311 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
312 	}
313 }
314 
315 static void mmhub_v4_2_0_mid_init_cache_regs(struct amdgpu_device *adev,
316 					     uint32_t mid_mask)
317 {
318 	uint32_t tmp;
319 	int i;
320 
321 	/* These registers are not accessible to VF-SRIOV.
322 	 * The PF will program them instead.
323 	 */
324 	if (amdgpu_sriov_vf(adev))
325 		return;
326 
327 	for_each_inst(i, mid_mask) {
328 		/* Setup L2 cache */
329 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL);
330 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
331 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
332 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
333 				    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
334 		/* XXX for emulation, Refer to closed source code.*/
335 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
336 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
337 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
338 				    PDE_FAULT_CLASSIFICATION, 0);
339 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
340 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
341 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
342 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
343 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL, tmp);
344 
345 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2);
346 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
347 				    INVALIDATE_ALL_L1_TLBS, 1);
348 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
349 				    INVALIDATE_L2_CACHE, 1);
350 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2, tmp);
351 
352 		tmp = regMMVM_L2_CNTL3_DEFAULT;
353 		if (adev->gmc.translate_further) {
354 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
355 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
356 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
357 		} else {
358 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
359 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
360 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
361 		}
362 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL3, tmp);
363 
364 		tmp = regMMVM_L2_CNTL4_DEFAULT;
365 		/* For AMD APP APUs setup WC memory */
366 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
367 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
368 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
369 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
370 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
371 		} else {
372 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
373 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
374 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
375 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
376 		}
377 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL4, tmp);
378 
379 		tmp = regMMVM_L2_CNTL5_DEFAULT;
380 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5,
381 				    L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
382 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL5, tmp);
383 	}
384 }
385 
386 static void mmhub_v4_2_0_mid_enable_system_domain(struct amdgpu_device *adev,
387 						  uint32_t mid_mask)
388 {
389 	uint32_t tmp;
390 	int i;
391 
392 	for_each_inst(i, mid_mask) {
393 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
394 				   regMMVM_CONTEXT0_CNTL);
395 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
396 				    ENABLE_CONTEXT, 1);
397 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
398 				    PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth);
399 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
400 				    PAGE_TABLE_BLOCK_SIZE,
401 				    adev->gmc.vmid0_page_table_block_size);
402 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
403 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
404 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
405 			     regMMVM_CONTEXT0_CNTL, tmp);
406 	}
407 }
408 
409 static void mmhub_v4_2_0_mid_disable_identity_aperture(struct amdgpu_device *adev,
410 						       uint32_t mid_mask)
411 {
412 	int i;
413 
414 	/* These registers are not accessible to VF-SRIOV.
415 	 * The PF will program them instead.
416 	 */
417 	if (amdgpu_sriov_vf(adev))
418 		return;
419 
420 	for_each_inst(i, mid_mask) {
421 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
422 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
423 			     0xFFFFFFFF);
424 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
425 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
426 			     0x00001FFF);
427 
428 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
429 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
430 			     0);
431 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
432 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
433 			     0);
434 
435 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
436 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
437 			     0);
438 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
439 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
440 			     0);
441 	}
442 }
443 
444 static void mmhub_v4_2_0_mid_setup_vmid_config(struct amdgpu_device *adev,
445 					       uint32_t mid_mask)
446 {
447 	struct amdgpu_vmhub *hub;
448 	uint32_t tmp;
449 	int i, j;
450 
451 	for_each_inst(j, mid_mask) {
452 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
453 		for (i = 0; i <= 14; i++) {
454 			tmp = RREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
455 					          regMMVM_CONTEXT1_CNTL,
456 						  i * hub->ctx_distance);
457 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
458 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
459 					    adev->vm_manager.num_level);
460 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
461 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
462 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
463 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
464 					    1);
465 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
466 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
467 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
468 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
469 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
470 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
471 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
472 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
473 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
474 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
475 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
476 					    PAGE_TABLE_BLOCK_SIZE,
477 					    adev->vm_manager.block_size - 9);
478 			/* Send no-retry XNACK on fault to suppress VM fault storm. */
479 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
480 					    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
481 					    !amdgpu_noretry);
482 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_CNTL,
483 					    i * hub->ctx_distance, tmp);
484 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
485 					    i * hub->ctx_addr_distance, 0);
486 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
487 					    i * hub->ctx_addr_distance, 0);
488 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
489 					    i * hub->ctx_addr_distance,
490 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
491 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
492 					    i * hub->ctx_addr_distance,
493 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
494 		}
495 	}
496 
497 	hub->vm_cntx_cntl = tmp;
498 }
499 
500 static void mmhub_v4_2_0_mid_program_invalidation(struct amdgpu_device *adev,
501 						  uint32_t mid_mask)
502 {
503 	struct amdgpu_vmhub *hub;
504 	unsigned int i, j;
505 
506 	for_each_inst(j, mid_mask) {
507 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
508 
509 		for (i = 0; i < 18; ++i) {
510 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
511 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
512 					    i * hub->eng_addr_distance, 0xffffffff);
513 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
514 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
515 					    i * hub->eng_addr_distance, 0x3fff);
516 		}
517 	}
518 }
519 
520 static int mmhub_v4_2_0_mid_gart_enable(struct amdgpu_device *adev,
521 					uint32_t mid_mask)
522 {
523 	/* GART Enable. */
524 	mmhub_v4_2_0_mid_init_gart_aperture_regs(adev, mid_mask);
525 	mmhub_v4_2_0_mid_init_system_aperture_regs(adev, mid_mask);
526 	mmhub_v4_2_0_mid_init_tlb_regs(adev, mid_mask);
527 	mmhub_v4_2_0_mid_init_cache_regs(adev, mid_mask);
528 
529 	mmhub_v4_2_0_mid_enable_system_domain(adev, mid_mask);
530 	mmhub_v4_2_0_mid_disable_identity_aperture(adev, mid_mask);
531 	mmhub_v4_2_0_mid_setup_vmid_config(adev, mid_mask);
532 	mmhub_v4_2_0_mid_program_invalidation(adev, mid_mask);
533 
534 	return 0;
535 }
536 static int mmhub_v4_2_0_gart_enable(struct amdgpu_device *adev)
537 {
538 	uint32_t mid_mask;
539 
540 	mid_mask = adev->aid_mask;
541 	return mmhub_v4_2_0_mid_gart_enable(adev, mid_mask);
542 }
543 
544 static void mmhub_v4_2_0_mid_gart_disable(struct amdgpu_device *adev,
545 					  uint32_t mid_mask)
546 {
547 	struct amdgpu_vmhub *hub;
548 	u32 tmp;
549 	u32 i, j;
550 
551 	for_each_inst(j, mid_mask) {
552 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
553 		/* Disable all tables */
554 		for (i = 0; i < 16; i++)
555 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
556 					    regMMVM_CONTEXT0_CNTL,
557 					    i * hub->ctx_distance, 0);
558 
559 		/* Setup TLB control */
560 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
561 				   regMMMC_VM_MX_L1_TLB_CNTL);
562 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
563 				    ENABLE_L1_TLB, 0);
564 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
565 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
566 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
567 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
568 
569 		/* Setup L2 cache */
570 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL);
571 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
572 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL, tmp);
573 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL3, 0);
574 	}
575 }
576 
577 static void mmhub_v4_2_0_gart_disable(struct amdgpu_device *adev)
578 {
579 	uint32_t mid_mask;
580 
581 	mid_mask = adev->aid_mask;
582 	mmhub_v4_2_0_mid_gart_disable(adev, mid_mask);
583 }
584 
585 static void
586 mmhub_v4_2_0_mid_set_fault_enable_default(struct amdgpu_device *adev,
587 					  bool value, uint32_t mid_mask)
588 {
589 	u32 tmp;
590 	int i;
591 
592 	/* These registers are not accessible to VF-SRIOV.
593 	 * The PF will program them instead.
594 	 */
595 	if (amdgpu_sriov_vf(adev))
596 		return;
597 
598 	for_each_inst(i, mid_mask) {
599 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
600 				   regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
601 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
602 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
603 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
604 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
605 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
606 				    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
607 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
608 				    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
609 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
610 				    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
611 				    value);
612 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
613 				    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
614 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
615 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
616 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
617 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
618 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
619 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
620 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
621 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
622 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
623 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
624 		if (!value) {
625 			tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
626 					    CRASH_ON_NO_RETRY_FAULT, 1);
627 		}
628 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
629 			     regMMVM_L2_PROTECTION_FAULT_CNTL_LO32, tmp);
630 	}
631 }
632 
633 
634 /**
635  * mmhub_v4_2_0_set_fault_enable_default - update GART/VM fault handling
636  *
637  * @adev: amdgpu_device pointer
638  * @value: true redirects VM faults to the default page
639  */
640 static void
641 mmhub_v4_2_0_set_fault_enable_default(struct amdgpu_device *adev,
642 				      bool value)
643 {
644 	uint32_t mid_mask;
645 
646 	mid_mask = adev->aid_mask;
647 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, mid_mask);
648 }
649 
650 static uint32_t mmhub_v4_2_0_get_invalidate_req(unsigned int vmid,
651 						uint32_t flush_type)
652 {
653 	u32 req = 0;
654 
655 	/* invalidate using legacy mode on vmid*/
656 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
657 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
658 	/* Only use legacy inv on mmhub side */
659 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
660 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
661 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
662 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
663 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
664 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE3, 1);
665 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
666 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
667 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
668 
669 	return req;
670 }
671 
672 /*TODO: l2 protection fault status is increased to 64bits.
673  * some critical fields like FED are moved to STATUS_HI32 */
674 static void
675 mmhub_v4_2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
676 					      uint32_t status)
677 {
678 	uint32_t cid, rw;
679 	const char *mmhub_cid = NULL;
680 
681 	cid = REG_GET_FIELD(status,
682 			    MMVM_L2_PROTECTION_FAULT_STATUS_LO32, CID);
683 	rw = REG_GET_FIELD(status,
684 			   MMVM_L2_PROTECTION_FAULT_STATUS_LO32, RW);
685 
686 	dev_err(adev->dev,
687 		"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
688 		status);
689 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
690 	case IP_VERSION(4, 2, 0):
691 		mmhub_cid = mmhub_client_ids_v4_2_0[cid][rw];
692 		break;
693 	default:
694 		mmhub_cid = NULL;
695 		break;
696 	}
697 	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
698 		mmhub_cid ? mmhub_cid : "unknown", cid);
699 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
700 		REG_GET_FIELD(status,
701 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MORE_FAULTS));
702 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
703 		REG_GET_FIELD(status,
704 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, WALKER_ERROR));
705 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
706 		REG_GET_FIELD(status,
707 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, PERMISSION_FAULTS));
708 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
709 		REG_GET_FIELD(status,
710 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MAPPING_ERROR));
711 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
712 }
713 
714 
715 static const struct amdgpu_vmhub_funcs mmhub_v4_2_0_vmhub_funcs = {
716 	.print_l2_protection_fault_status = mmhub_v4_2_0_print_l2_protection_fault_status,
717 	.get_invalidate_req = mmhub_v4_2_0_get_invalidate_req,
718 };
719 
720 static void mmhub_v4_2_0_mid_init(struct amdgpu_device *adev,
721 				  uint32_t mid_mask)
722 {
723 	struct amdgpu_vmhub *hub;
724 	int i;
725 
726 	for_each_inst(i, mid_mask) {
727 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
728 
729 		hub->ctx0_ptb_addr_lo32 =
730 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
731 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
732 		hub->ctx0_ptb_addr_hi32 =
733 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
734 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
735 		hub->vm_inv_eng0_sem =
736 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
737 					 regMMVM_INVALIDATE_ENG0_SEM);
738 		hub->vm_inv_eng0_req =
739 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
740 					 regMMVM_INVALIDATE_ENG0_REQ);
741 		hub->vm_inv_eng0_ack =
742 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
743 					 regMMVM_INVALIDATE_ENG0_ACK);
744 		hub->vm_context0_cntl =
745 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
746 					 regMMVM_CONTEXT0_CNTL);
747 		/* TODO: add a new member to accomandate additional fault status/cntl reg */
748 		hub->vm_l2_pro_fault_status =
749 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
750 					 regMMVM_L2_PROTECTION_FAULT_STATUS_LO32);
751 		hub->vm_l2_pro_fault_cntl =
752 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
753 					 regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
754 
755 		hub->ctx_distance = regMMVM_CONTEXT1_CNTL - regMMVM_CONTEXT0_CNTL;
756 		hub->ctx_addr_distance = regMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
757 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
758 		hub->eng_distance = regMMVM_INVALIDATE_ENG1_REQ -
759 				    regMMVM_INVALIDATE_ENG0_REQ;
760 		hub->eng_addr_distance = regMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
761 					 regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
762 
763 		hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
764 			MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
765 			MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
766 			MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
767 			MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
768 			MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
769 			MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
770 
771 		hub->vm_l2_bank_select_reserved_cid2 =
772 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_BANK_SELECT_RESERVED_CID2);
773 
774 		hub->vm_contexts_disable =
775 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_CONTEXTS_DISABLE);
776 
777 		hub->vmhub_funcs = &mmhub_v4_2_0_vmhub_funcs;
778 	}
779 }
780 
781 static void mmhub_v4_2_0_init(struct amdgpu_device *adev)
782 {
783 	uint32_t mid_mask;
784 
785 	mid_mask = adev->aid_mask;
786 	mmhub_v4_2_0_mid_init(adev, mid_mask);
787 }
788 
789 static void
790 mmhub_v4_2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
791 					      bool enable)
792 {
793 	uint32_t def, data;
794 	uint32_t def1, data1, def2 = 0, data2 = 0;
795 	def  = data  = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
796 	def1 = data1 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2);
797 	def2 = data2 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2);
798 
799 	if (enable) {
800 		data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
801 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
802 			   DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
803 
804 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
805 			   DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
806 	} else {
807 		data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
808 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
809 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
810 
811 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
812 			  DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
813 	}
814 
815 	if (def != data)
816 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
817 	if (def1 != data1)
818 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2, data1);
819 
820 	if (def2 != data2)
821 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2, data2);
822 }
823 
824 static void
825 mmhub_v4_2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
826 					     bool enable)
827 {
828 	uint32_t def, data;
829 
830 	def = data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
831 
832 	if (enable)
833 		data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
834 	else
835 		data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
836 
837 	if (def != data)
838 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
839 }
840 
841 static int mmhub_v4_2_0_set_clockgating(struct amdgpu_device *adev,
842 					enum amd_clockgating_state state)
843 {
844 	if (amdgpu_sriov_vf(adev))
845 		return 0;
846 
847 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)
848 		mmhub_v4_2_0_update_medium_grain_clock_gating(adev,
849 				state == AMD_CG_STATE_GATE);
850 
851 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)
852 		mmhub_v4_2_0_update_medium_grain_light_sleep(adev,
853 				state == AMD_CG_STATE_GATE);
854 
855 	return 0;
856 }
857 
858 static void mmhub_v4_2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
859 {
860 	int data;
861 
862 	if (amdgpu_sriov_vf(adev))
863 		*flags = 0;
864 
865 	data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
866 
867 	/* AMD_CG_SUPPORT_MC_MGCG */
868 	if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
869 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
870 
871 	/* AMD_CG_SUPPORT_MC_LS */
872 	if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
873 		*flags |= AMD_CG_SUPPORT_MC_LS;
874 }
875 
876 const struct amdgpu_mmhub_funcs mmhub_v4_2_0_funcs = {
877 	.init = mmhub_v4_2_0_init,
878 	.get_fb_location = mmhub_v4_2_0_get_fb_location,
879 	.get_mc_fb_offset = mmhub_v4_2_0_get_mc_fb_offset,
880 	.setup_vm_pt_regs = mmhub_v4_2_0_setup_vm_pt_regs,
881 	.gart_enable = mmhub_v4_2_0_gart_enable,
882 	.gart_disable = mmhub_v4_2_0_gart_disable,
883 	.set_fault_enable_default = mmhub_v4_2_0_set_fault_enable_default,
884 	.set_clockgating = mmhub_v4_2_0_set_clockgating,
885 	.get_clockgating = mmhub_v4_2_0_get_clockgating,
886 };
887 
888 static int mmhub_v4_2_0_xcp_resume(void *handle, uint32_t inst_mask)
889 {
890 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
891 	bool value;
892 
893 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
894 		value = false;
895 	else
896 		value = true;
897 
898 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, inst_mask);
899 
900 	if (!amdgpu_sriov_vf(adev))
901 		return mmhub_v4_2_0_mid_gart_enable(adev, inst_mask);
902 
903 	return 0;
904 }
905 
906 static int mmhub_v4_2_0_xcp_suspend(void *handle, uint32_t inst_mask)
907 {
908 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
909 
910 	if (!amdgpu_sriov_vf(adev))
911 		mmhub_v4_2_0_mid_gart_disable(adev, inst_mask);
912 
913 	return 0;
914 }
915 
916 struct amdgpu_xcp_ip_funcs mmhub_v4_2_0_xcp_funcs = {
917 	.suspend = &mmhub_v4_2_0_xcp_suspend,
918 	.resume = &mmhub_v4_2_0_xcp_resume
919 };
920