xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "mmhub_v4_2_0.h"
26 
27 #include "mmhub/mmhub_4_2_0_offset.h"
28 #include "mmhub/mmhub_4_2_0_sh_mask.h"
29 
30 #include "soc15_common.h"
31 #include "soc24_enum.h"
32 
33 #define regMMVM_L2_CNTL3_DEFAULT				0x80100007
34 #define regMMVM_L2_CNTL4_DEFAULT				0x000000c1
35 #define regMMVM_L2_CNTL5_DEFAULT				0x00003fe0
36 
37 static const char *mmhub_client_ids_v4_2_0[][2] = {
38 	[0][0] = "VMC",
39 	[4][0] = "DCEDMC",
40 	[5][0] = "DCEVGA",
41 	[6][0] = "MP0",
42 	[7][0] = "MP1",
43 	[8][0] = "MPIO",
44 	[16][0] = "HDP",
45 	[17][0] = "LSDMA",
46 	[18][0] = "JPEG",
47 	[19][0] = "VCNU0",
48 	[21][0] = "VSCH",
49 	[22][0] = "VCNU1",
50 	[23][0] = "VCN1",
51 	[32+20][0] = "VCN0",
52 	[2][1] = "DBGUNBIO",
53 	[3][1] = "DCEDWB",
54 	[4][1] = "DCEDMC",
55 	[5][1] = "DCEVGA",
56 	[6][1] = "MP0",
57 	[7][1] = "MP1",
58 	[8][1] = "MPIO",
59 	[10][1] = "DBGU0",
60 	[11][1] = "DBGU1",
61 	[12][1] = "DBGU2",
62 	[13][1] = "DBGU3",
63 	[14][1] = "XDP",
64 	[15][1] = "OSSSYS",
65 	[16][1] = "HDP",
66 	[17][1] = "LSDMA",
67 	[18][1] = "JPEG",
68 	[19][1] = "VCNU0",
69 	[20][1] = "VCN0",
70 	[21][1] = "VSCH",
71 	[22][1] = "VCNU1",
72 	[23][1] = "VCN1",
73 };
74 
75 static int mmhub_v4_2_0_get_xgmi_info(struct amdgpu_device *adev)
76 {
77 	u32 max_num_physical_nodes;
78 	u32 max_physical_node_id;
79 	u32 xgmi_lfb_cntl;
80 	u32 max_region;
81 	u64 seg_size;
82 
83 	/* limit this callback to A + A configuration only */
84 	if (!adev->gmc.xgmi.connected_to_cpu)
85 		return 0;
86 
87 	xgmi_lfb_cntl = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
88 				     regMMMC_VM_XGMI_LFB_CNTL);
89 	seg_size = REG_GET_FIELD(
90 		RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMMMC_VM_XGMI_LFB_SIZE),
91 		MMMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
92 	max_region =
93 		REG_GET_FIELD(xgmi_lfb_cntl, MMMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
94 
95 	max_num_physical_nodes   = 4;
96 	max_physical_node_id     = 3;
97 
98 	adev->gmc.xgmi.num_physical_nodes = max_region + 1;
99 
100 	if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
101 		return -EINVAL;
102 
103 	adev->gmc.xgmi.physical_node_id =
104 		REG_GET_FIELD(xgmi_lfb_cntl, MMMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
105 
106 	if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
107 		return -EINVAL;
108 
109 	adev->gmc.xgmi.node_segment_size = seg_size;
110 
111 	return 0;
112 }
113 
114 static u64 mmhub_v4_2_0_get_fb_location(struct amdgpu_device *adev)
115 {
116 	u64 base;
117 
118 	base = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
119 			    regMMMC_VM_FB_LOCATION_BASE_LO32);
120 	base &= MMMC_VM_FB_LOCATION_BASE_LO32__FB_BASE_LO32_MASK;
121 	base <<= 24;
122 
123 	base |= ((u64)(MMMC_VM_FB_LOCATION_BASE_HI32__FB_BASE_HI1_MASK &
124 		       RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
125 				    regMMMC_VM_FB_LOCATION_BASE_HI32)) << 56);
126 
127 	return base;
128 }
129 
130 static u64 mmhub_v4_2_0_get_mc_fb_offset(struct amdgpu_device *adev)
131 {
132 	return (u64)RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
133 			         regMMMC_VM_FB_OFFSET) << 24;
134 }
135 
136 static void mmhub_v4_2_0_mid_setup_vm_pt_regs(struct amdgpu_device *adev,
137 					      uint32_t vmid,
138 					      uint64_t page_table_base,
139 					      uint32_t mid_mask)
140 {
141 	struct amdgpu_vmhub *hub;
142 	int i;
143 
144 	for_each_inst(i, mid_mask) {
145 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
146 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
147 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
148 				    hub->ctx_addr_distance * vmid,
149 				    lower_32_bits(page_table_base));
150 
151 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
152 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
153 				    hub->ctx_addr_distance * vmid,
154 				    upper_32_bits(page_table_base));
155 	}
156 }
157 
158 static void mmhub_v4_2_0_setup_vm_pt_regs(struct amdgpu_device *adev,
159 					  uint32_t vmid,
160 					  uint64_t page_table_base)
161 {
162 	uint32_t mid_mask;
163 
164 	mid_mask = adev->aid_mask;
165 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, vmid,
166 					  page_table_base,
167 					  mid_mask);
168 }
169 
170 static void mmhub_v4_2_0_mid_init_gart_aperture_regs(struct amdgpu_device *adev,
171 						     uint32_t mid_mask)
172 {
173 	uint64_t pt_base;
174 	int i;
175 
176 	if (adev->gmc.pdb0_bo)
177 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
178 	else
179 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
180 
181 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, 0, pt_base, mid_mask);
182 
183 	for_each_inst(i, mid_mask) {
184 		if (adev->gmc.pdb0_bo) {
185 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
186 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
187 				     (u32)(adev->gmc.fb_start >> 12));
188 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
189 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
190 				     (u32)(adev->gmc.fb_start >> 44));
191 
192 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
193 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
194 				     (u32)(adev->gmc.gart_end >> 12));
195 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
196 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
197 				     (u32)(adev->gmc.gart_end >> 44));
198 		} else {
199 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
200 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
201 				     (u32)(adev->gmc.gart_start >> 12));
202 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
203 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
204 				     (u32)(adev->gmc.gart_start >> 44));
205 
206 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
207 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
208 				     (u32)(adev->gmc.gart_end >> 12));
209 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
210 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
211 				     (u32)(adev->gmc.gart_end >> 44));
212 		}
213 	}
214 }
215 
216 static void mmhub_v4_2_0_mid_init_system_aperture_regs(struct amdgpu_device *adev,
217 						       uint32_t mid_mask)
218 {
219 	uint64_t value;
220 	uint32_t tmp;
221 	int i;
222 
223 	/*
224 	 * the new L1 policy will block SRIOV guest from writing
225 	 * these regs, and they will be programed at host.
226 	 * so skip programing these regs.
227 	 */
228 	if (amdgpu_sriov_vf(adev))
229 		return;
230 
231 	for_each_inst(i, mid_mask) {
232 		if (adev->gmc.pdb0_bo) {
233 			/* Disable agp and system aperture
234 			 * when vmid0 page table is enabled */
235 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
236 				     regMMMC_VM_FB_LOCATION_TOP_LO32, 0);
237 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
238 				     regMMMC_VM_FB_LOCATION_TOP_HI32, 0);
239 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
240 				     regMMMC_VM_FB_LOCATION_BASE_LO32,
241 				     0xFFFFFFFF);
242 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
243 				     regMMMC_VM_FB_LOCATION_BASE_HI32, 1);
244 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
245 				     regMMMC_VM_AGP_TOP_LO32, 0);
246 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
247 				     regMMMC_VM_AGP_TOP_HI32, 0);
248 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
249 				     regMMMC_VM_AGP_BOT_LO32,
250 				     0xFFFFFFFF);
251 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
252 				     regMMMC_VM_AGP_BOT_HI32, 1);
253 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
254 				     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
255 				     0xFFFFFFFF);
256 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
257 				     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
258 				     0x7F);
259 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
260 				     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32, 0);
261 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
262 				     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32, 0);
263 		} else {
264 			/* Program the AGP BAR */
265 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
266 				     regMMMC_VM_AGP_BASE_LO32, 0);
267 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
268 				     regMMMC_VM_AGP_BASE_HI32, 0);
269 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
270 				     regMMMC_VM_AGP_BOT_LO32,
271 				     lower_32_bits(adev->gmc.agp_start >> 24));
272 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
273 				     regMMMC_VM_AGP_BOT_HI32,
274 				     upper_32_bits(adev->gmc.agp_start >> 24));
275 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
276 				     regMMMC_VM_AGP_TOP_LO32,
277 				     lower_32_bits(adev->gmc.agp_end >> 24));
278 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
279 				     regMMMC_VM_AGP_TOP_HI32,
280 				     upper_32_bits(adev->gmc.agp_end >> 24));
281 
282 			/* Program the system aperture low logical page number. */
283 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
284 				     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
285 				     lower_32_bits(min(adev->gmc.fb_start,
286 						   adev->gmc.agp_start) >> 18));
287 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
288 				     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
289 				     upper_32_bits(min(adev->gmc.fb_start,
290 						   adev->gmc.agp_start) >> 18));
291 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
292 				     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32,
293 				     lower_32_bits(max(adev->gmc.fb_end,
294 						   adev->gmc.agp_end) >> 18));
295 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
296 				     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32,
297 				     upper_32_bits(max(adev->gmc.fb_end,
298 						   adev->gmc.agp_end) >> 18));
299 		}
300 
301 		/* Set default page address. */
302 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
303 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
304 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
305 			     (u32)(value >> 12));
306 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
307 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
308 			     (u32)(value >> 44));
309 
310 		/* Program "protection fault". */
311 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
312 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
313 			     (u32)(adev->dummy_page_addr >> 12));
314 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
315 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
316 			     (u32)((u64)adev->dummy_page_addr >> 44));
317 
318 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
319 				   regMMVM_L2_PROTECTION_FAULT_CNTL2);
320 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
321 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
322 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
323 				    ENABLE_RETRY_FAULT_INTERRUPT, 0x1);
324 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
325 			     regMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
326 	}
327 }
328 
329 static void mmhub_v4_2_0_mid_init_tlb_regs(struct amdgpu_device *adev,
330 					   uint32_t mid_mask)
331 {
332 	uint32_t tmp;
333 	int i;
334 
335 	for_each_inst(i, mid_mask) {
336 		/* Setup TLB control */
337 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
338 				   regMMMC_VM_MX_L1_TLB_CNTL);
339 
340 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
341 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
342 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
343 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
344 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
345 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
346 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
347 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
348 				    MTYPE, MTYPE_UC); /* UC, uncached */
349 
350 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
351 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
352 	}
353 }
354 
355 static void mmhub_v4_2_0_mid_init_cache_regs(struct amdgpu_device *adev,
356 					     uint32_t mid_mask)
357 {
358 	uint32_t tmp;
359 	int i;
360 
361 	/* These registers are not accessible to VF-SRIOV.
362 	 * The PF will program them instead.
363 	 */
364 	if (amdgpu_sriov_vf(adev))
365 		return;
366 
367 	for_each_inst(i, mid_mask) {
368 		/* Setup L2 cache */
369 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL);
370 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
371 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
372 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
373 				    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
374 		/* XXX for emulation, Refer to closed source code.*/
375 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
376 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
377 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
378 				    PDE_FAULT_CLASSIFICATION, 0);
379 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
380 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
381 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
382 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
383 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL, tmp);
384 
385 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2);
386 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
387 				    INVALIDATE_ALL_L1_TLBS, 1);
388 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
389 				    INVALIDATE_L2_CACHE, 1);
390 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2, tmp);
391 
392 		tmp = regMMVM_L2_CNTL3_DEFAULT;
393 		if (adev->gmc.translate_further) {
394 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
395 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
396 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
397 		} else {
398 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
399 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
400 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
401 		}
402 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL3, tmp);
403 
404 		tmp = regMMVM_L2_CNTL4_DEFAULT;
405 		/* For AMD APP APUs setup WC memory */
406 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
407 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
408 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
409 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
410 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
411 		} else {
412 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
413 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
414 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
415 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
416 		}
417 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL4, tmp);
418 
419 		tmp = regMMVM_L2_CNTL5_DEFAULT;
420 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5,
421 				    L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
422 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL5, tmp);
423 	}
424 }
425 
426 static void mmhub_v4_2_0_mid_enable_system_domain(struct amdgpu_device *adev,
427 						  uint32_t mid_mask)
428 {
429 	uint32_t tmp;
430 	int i;
431 
432 	for_each_inst(i, mid_mask) {
433 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
434 				   regMMVM_CONTEXT0_CNTL);
435 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
436 				    ENABLE_CONTEXT, 1);
437 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
438 				    PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth);
439 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
440 				    PAGE_TABLE_BLOCK_SIZE,
441 				    adev->gmc.vmid0_page_table_block_size);
442 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
443 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
444 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
445 			     regMMVM_CONTEXT0_CNTL, tmp);
446 	}
447 }
448 
449 static void mmhub_v4_2_0_mid_disable_identity_aperture(struct amdgpu_device *adev,
450 						       uint32_t mid_mask)
451 {
452 	int i;
453 
454 	/* These registers are not accessible to VF-SRIOV.
455 	 * The PF will program them instead.
456 	 */
457 	if (amdgpu_sriov_vf(adev))
458 		return;
459 
460 	for_each_inst(i, mid_mask) {
461 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
462 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
463 			     0xFFFFFFFF);
464 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
465 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
466 			     0x00001FFF);
467 
468 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
469 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
470 			     0);
471 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
472 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
473 			     0);
474 
475 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
476 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
477 			     0);
478 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
479 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
480 			     0);
481 	}
482 }
483 
484 static void mmhub_v4_2_0_mid_setup_vmid_config(struct amdgpu_device *adev,
485 					       uint32_t mid_mask)
486 {
487 	struct amdgpu_vmhub *hub;
488 	uint32_t tmp;
489 	int i, j;
490 
491 	for_each_inst(j, mid_mask) {
492 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
493 		for (i = 0; i <= 14; i++) {
494 			tmp = RREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
495 					          regMMVM_CONTEXT1_CNTL,
496 						  i * hub->ctx_distance);
497 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
498 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
499 					    adev->vm_manager.num_level);
500 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
501 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
502 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
503 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
504 					    1);
505 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
506 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
507 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
508 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
509 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
510 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
511 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
512 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
513 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
514 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
515 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
516 					    PAGE_TABLE_BLOCK_SIZE,
517 					    adev->vm_manager.block_size - 9);
518 			/* Send no-retry XNACK on fault to suppress VM fault storm. */
519 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
520 					    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
521 					    !amdgpu_noretry);
522 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_CNTL,
523 					    i * hub->ctx_distance, tmp);
524 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
525 					    i * hub->ctx_addr_distance, 0);
526 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
527 					    i * hub->ctx_addr_distance, 0);
528 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
529 					    i * hub->ctx_addr_distance,
530 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
531 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
532 					    i * hub->ctx_addr_distance,
533 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
534 		}
535 	}
536 
537 	hub->vm_cntx_cntl = tmp;
538 }
539 
540 static void mmhub_v4_2_0_mid_program_invalidation(struct amdgpu_device *adev,
541 						  uint32_t mid_mask)
542 {
543 	struct amdgpu_vmhub *hub;
544 	unsigned int i, j;
545 
546 	for_each_inst(j, mid_mask) {
547 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
548 
549 		for (i = 0; i < 18; ++i) {
550 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
551 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
552 					    i * hub->eng_addr_distance, 0xffffffff);
553 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
554 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
555 					    i * hub->eng_addr_distance, 0x3fff);
556 		}
557 	}
558 }
559 
560 static int mmhub_v4_2_0_mid_gart_enable(struct amdgpu_device *adev,
561 					uint32_t mid_mask)
562 {
563 	/* GART Enable. */
564 	mmhub_v4_2_0_mid_init_gart_aperture_regs(adev, mid_mask);
565 	mmhub_v4_2_0_mid_init_system_aperture_regs(adev, mid_mask);
566 	mmhub_v4_2_0_mid_init_tlb_regs(adev, mid_mask);
567 	mmhub_v4_2_0_mid_init_cache_regs(adev, mid_mask);
568 
569 	mmhub_v4_2_0_mid_enable_system_domain(adev, mid_mask);
570 	mmhub_v4_2_0_mid_disable_identity_aperture(adev, mid_mask);
571 	mmhub_v4_2_0_mid_setup_vmid_config(adev, mid_mask);
572 	mmhub_v4_2_0_mid_program_invalidation(adev, mid_mask);
573 
574 	return 0;
575 }
576 static int mmhub_v4_2_0_gart_enable(struct amdgpu_device *adev)
577 {
578 	uint32_t mid_mask;
579 
580 	mid_mask = adev->aid_mask;
581 	return mmhub_v4_2_0_mid_gart_enable(adev, mid_mask);
582 }
583 
584 static void mmhub_v4_2_0_mid_gart_disable(struct amdgpu_device *adev,
585 					  uint32_t mid_mask)
586 {
587 	struct amdgpu_vmhub *hub;
588 	u32 tmp;
589 	u32 i, j;
590 
591 	for_each_inst(j, mid_mask) {
592 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
593 		/* Disable all tables */
594 		for (i = 0; i < 16; i++)
595 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
596 					    regMMVM_CONTEXT0_CNTL,
597 					    i * hub->ctx_distance, 0);
598 
599 		/* Setup TLB control */
600 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
601 				   regMMMC_VM_MX_L1_TLB_CNTL);
602 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
603 				    ENABLE_L1_TLB, 0);
604 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
605 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
606 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
607 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
608 
609 		/* Setup L2 cache */
610 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL);
611 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
612 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL, tmp);
613 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL3, 0);
614 	}
615 }
616 
617 static void mmhub_v4_2_0_gart_disable(struct amdgpu_device *adev)
618 {
619 	uint32_t mid_mask;
620 
621 	mid_mask = adev->aid_mask;
622 	mmhub_v4_2_0_mid_gart_disable(adev, mid_mask);
623 }
624 
625 static void
626 mmhub_v4_2_0_mid_set_fault_enable_default(struct amdgpu_device *adev,
627 					  bool value, uint32_t mid_mask)
628 {
629 	u32 tmp;
630 	int i;
631 
632 	/* These registers are not accessible to VF-SRIOV.
633 	 * The PF will program them instead.
634 	 */
635 	if (amdgpu_sriov_vf(adev))
636 		return;
637 
638 	for_each_inst(i, mid_mask) {
639 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
640 				   regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
641 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
642 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
643 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
644 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
645 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
646 				    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
647 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
648 				    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
649 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
650 				    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
651 				    value);
652 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
653 				    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
654 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
655 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
656 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
657 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
658 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
659 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
660 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
661 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
662 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
663 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
664 		if (!value) {
665 			tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
666 					    CRASH_ON_NO_RETRY_FAULT, 1);
667 		}
668 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
669 			     regMMVM_L2_PROTECTION_FAULT_CNTL_LO32, tmp);
670 	}
671 }
672 
673 
674 /**
675  * mmhub_v4_2_0_set_fault_enable_default - update GART/VM fault handling
676  *
677  * @adev: amdgpu_device pointer
678  * @value: true redirects VM faults to the default page
679  */
680 static void
681 mmhub_v4_2_0_set_fault_enable_default(struct amdgpu_device *adev,
682 				      bool value)
683 {
684 	uint32_t mid_mask;
685 
686 	mid_mask = adev->aid_mask;
687 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, mid_mask);
688 }
689 
690 static uint32_t mmhub_v4_2_0_get_invalidate_req(unsigned int vmid,
691 						uint32_t flush_type)
692 {
693 	u32 req = 0;
694 
695 	/* invalidate using legacy mode on vmid*/
696 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
697 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
698 	/* Only use legacy inv on mmhub side */
699 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
700 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
701 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
702 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
703 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
704 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE3, 1);
705 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
706 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
707 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
708 
709 	return req;
710 }
711 
712 /*TODO: l2 protection fault status is increased to 64bits.
713  * some critical fields like FED are moved to STATUS_HI32 */
714 static void
715 mmhub_v4_2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
716 					      uint32_t status)
717 {
718 	uint32_t cid, rw;
719 	const char *mmhub_cid;
720 
721 	cid = REG_GET_FIELD(status,
722 			    MMVM_L2_PROTECTION_FAULT_STATUS_LO32, CID);
723 	rw = REG_GET_FIELD(status,
724 			   MMVM_L2_PROTECTION_FAULT_STATUS_LO32, RW);
725 
726 	dev_err(adev->dev,
727 		"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
728 		status);
729 	mmhub_cid = amdgpu_mmhub_client_name(&adev->mmhub, cid, rw);
730 	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
731 		mmhub_cid ? mmhub_cid : "unknown", cid);
732 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
733 		REG_GET_FIELD(status,
734 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MORE_FAULTS));
735 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
736 		REG_GET_FIELD(status,
737 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, WALKER_ERROR));
738 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
739 		REG_GET_FIELD(status,
740 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, PERMISSION_FAULTS));
741 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
742 		REG_GET_FIELD(status,
743 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MAPPING_ERROR));
744 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
745 }
746 
747 
748 static const struct amdgpu_vmhub_funcs mmhub_v4_2_0_vmhub_funcs = {
749 	.print_l2_protection_fault_status = mmhub_v4_2_0_print_l2_protection_fault_status,
750 	.get_invalidate_req = mmhub_v4_2_0_get_invalidate_req,
751 };
752 
753 static void mmhub_v4_2_0_mid_init(struct amdgpu_device *adev,
754 				  uint32_t mid_mask)
755 {
756 	struct amdgpu_vmhub *hub;
757 	int i;
758 
759 	for_each_inst(i, mid_mask) {
760 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
761 
762 		hub->ctx0_ptb_addr_lo32 =
763 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
764 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
765 		hub->ctx0_ptb_addr_hi32 =
766 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
767 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
768 		hub->vm_inv_eng0_sem =
769 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
770 					 regMMVM_INVALIDATE_ENG0_SEM);
771 		hub->vm_inv_eng0_req =
772 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
773 					 regMMVM_INVALIDATE_ENG0_REQ);
774 		hub->vm_inv_eng0_ack =
775 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
776 					 regMMVM_INVALIDATE_ENG0_ACK);
777 		hub->vm_context0_cntl =
778 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
779 					 regMMVM_CONTEXT0_CNTL);
780 		/* TODO: add a new member to accomandate additional fault status/cntl reg */
781 		hub->vm_l2_pro_fault_status =
782 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
783 					 regMMVM_L2_PROTECTION_FAULT_STATUS_LO32);
784 		hub->vm_l2_pro_fault_cntl =
785 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
786 					 regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
787 
788 		hub->ctx_distance = regMMVM_CONTEXT1_CNTL - regMMVM_CONTEXT0_CNTL;
789 		hub->ctx_addr_distance = regMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
790 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
791 		hub->eng_distance = regMMVM_INVALIDATE_ENG1_REQ -
792 				    regMMVM_INVALIDATE_ENG0_REQ;
793 		hub->eng_addr_distance = regMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
794 					 regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
795 
796 		hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
797 			MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
798 			MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
799 			MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
800 			MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
801 			MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
802 			MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
803 
804 		hub->vm_l2_bank_select_reserved_cid2 =
805 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_BANK_SELECT_RESERVED_CID2);
806 
807 		hub->vm_contexts_disable =
808 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_CONTEXTS_DISABLE);
809 
810 		hub->vmhub_funcs = &mmhub_v4_2_0_vmhub_funcs;
811 	}
812 }
813 
814 static void mmhub_v4_2_0_init(struct amdgpu_device *adev)
815 {
816 	uint32_t mid_mask;
817 
818 	mid_mask = adev->aid_mask;
819 	mmhub_v4_2_0_mid_init(adev, mid_mask);
820 
821 	amdgpu_mmhub_init_client_info(&adev->mmhub,
822 				     mmhub_client_ids_v4_2_0,
823 				     ARRAY_SIZE(mmhub_client_ids_v4_2_0));
824 }
825 
826 static void
827 mmhub_v4_2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
828 					      bool enable)
829 {
830 	uint32_t def, data;
831 	uint32_t def1, data1, def2 = 0, data2 = 0;
832 	def  = data  = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
833 	def1 = data1 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2);
834 	def2 = data2 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2);
835 
836 	if (enable) {
837 		data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
838 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
839 			   DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
840 
841 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
842 			   DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
843 	} else {
844 		data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
845 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
846 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
847 
848 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
849 			  DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
850 	}
851 
852 	if (def != data)
853 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
854 	if (def1 != data1)
855 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2, data1);
856 
857 	if (def2 != data2)
858 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2, data2);
859 }
860 
861 static void
862 mmhub_v4_2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
863 					     bool enable)
864 {
865 	uint32_t def, data;
866 
867 	def = data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
868 
869 	if (enable)
870 		data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
871 	else
872 		data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
873 
874 	if (def != data)
875 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
876 }
877 
878 static int mmhub_v4_2_0_set_clockgating(struct amdgpu_device *adev,
879 					enum amd_clockgating_state state)
880 {
881 	if (amdgpu_sriov_vf(adev))
882 		return 0;
883 
884 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)
885 		mmhub_v4_2_0_update_medium_grain_clock_gating(adev,
886 				state == AMD_CG_STATE_GATE);
887 
888 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)
889 		mmhub_v4_2_0_update_medium_grain_light_sleep(adev,
890 				state == AMD_CG_STATE_GATE);
891 
892 	return 0;
893 }
894 
895 static void mmhub_v4_2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
896 {
897 	int data;
898 
899 	if (amdgpu_sriov_vf(adev))
900 		*flags = 0;
901 
902 	data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
903 
904 	/* AMD_CG_SUPPORT_MC_MGCG */
905 	if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
906 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
907 
908 	/* AMD_CG_SUPPORT_MC_LS */
909 	if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
910 		*flags |= AMD_CG_SUPPORT_MC_LS;
911 }
912 
913 const struct amdgpu_mmhub_funcs mmhub_v4_2_0_funcs = {
914 	.init = mmhub_v4_2_0_init,
915 	.get_fb_location = mmhub_v4_2_0_get_fb_location,
916 	.get_mc_fb_offset = mmhub_v4_2_0_get_mc_fb_offset,
917 	.setup_vm_pt_regs = mmhub_v4_2_0_setup_vm_pt_regs,
918 	.gart_enable = mmhub_v4_2_0_gart_enable,
919 	.gart_disable = mmhub_v4_2_0_gart_disable,
920 	.set_fault_enable_default = mmhub_v4_2_0_set_fault_enable_default,
921 	.set_clockgating = mmhub_v4_2_0_set_clockgating,
922 	.get_clockgating = mmhub_v4_2_0_get_clockgating,
923 	.get_xgmi_info = mmhub_v4_2_0_get_xgmi_info,
924 };
925 
926 static int mmhub_v4_2_0_xcp_resume(void *handle, uint32_t inst_mask)
927 {
928 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929 	bool value;
930 
931 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
932 		value = false;
933 	else
934 		value = true;
935 
936 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, inst_mask);
937 
938 	if (!amdgpu_sriov_vf(adev))
939 		return mmhub_v4_2_0_mid_gart_enable(adev, inst_mask);
940 
941 	return 0;
942 }
943 
944 static int mmhub_v4_2_0_xcp_suspend(void *handle, uint32_t inst_mask)
945 {
946 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
947 
948 	if (!amdgpu_sriov_vf(adev))
949 		mmhub_v4_2_0_mid_gart_disable(adev, inst_mask);
950 
951 	return 0;
952 }
953 
954 struct amdgpu_xcp_ip_funcs mmhub_v4_2_0_xcp_funcs = {
955 	.suspend = &mmhub_v4_2_0_xcp_suspend,
956 	.resume = &mmhub_v4_2_0_xcp_resume
957 };
958