xref: /linux/drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c (revision 4af2e15c9bf240206aa14089791c28abc59eaa44)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "mmhub_v4_2_0.h"
26 
27 #include "mmhub/mmhub_4_2_0_offset.h"
28 #include "mmhub/mmhub_4_2_0_sh_mask.h"
29 
30 #include "soc15_common.h"
31 #include "soc24_enum.h"
32 
33 #define regMMVM_L2_CNTL3_DEFAULT				0x80100007
34 #define regMMVM_L2_CNTL4_DEFAULT				0x000000c1
35 #define regMMVM_L2_CNTL5_DEFAULT				0x00003fe0
36 
37 static const char *mmhub_client_ids_v4_2_0[][2] = {
38 	[0][0] = "VMC",
39 	[4][0] = "DCEDMC",
40 	[5][0] = "DCEVGA",
41 	[6][0] = "MP0",
42 	[7][0] = "MP1",
43 	[8][0] = "MPIO",
44 	[16][0] = "HDP",
45 	[17][0] = "LSDMA",
46 	[18][0] = "JPEG",
47 	[19][0] = "VCNU0",
48 	[21][0] = "VSCH",
49 	[22][0] = "VCNU1",
50 	[23][0] = "VCN1",
51 	[32+20][0] = "VCN0",
52 	[2][1] = "DBGUNBIO",
53 	[3][1] = "DCEDWB",
54 	[4][1] = "DCEDMC",
55 	[5][1] = "DCEVGA",
56 	[6][1] = "MP0",
57 	[7][1] = "MP1",
58 	[8][1] = "MPIO",
59 	[10][1] = "DBGU0",
60 	[11][1] = "DBGU1",
61 	[12][1] = "DBGU2",
62 	[13][1] = "DBGU3",
63 	[14][1] = "XDP",
64 	[15][1] = "OSSSYS",
65 	[16][1] = "HDP",
66 	[17][1] = "LSDMA",
67 	[18][1] = "JPEG",
68 	[19][1] = "VCNU0",
69 	[20][1] = "VCN0",
70 	[21][1] = "VSCH",
71 	[22][1] = "VCNU1",
72 	[23][1] = "VCN1",
73 };
74 
75 static u64 mmhub_v4_2_0_get_fb_location(struct amdgpu_device *adev)
76 {
77 	u64 base;
78 
79 	base = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
80 			    regMMMC_VM_FB_LOCATION_BASE_LO32);
81 	base &= MMMC_VM_FB_LOCATION_BASE_LO32__FB_BASE_LO32_MASK;
82 	base <<= 24;
83 
84 	base |= ((u64)(MMMC_VM_FB_LOCATION_BASE_HI32__FB_BASE_HI1_MASK &
85 		       RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
86 				    regMMMC_VM_FB_LOCATION_BASE_HI32)) << 56);
87 
88 	return base;
89 }
90 
91 static u64 mmhub_v4_2_0_get_mc_fb_offset(struct amdgpu_device *adev)
92 {
93 	return (u64)RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0),
94 			         regMMMC_VM_FB_OFFSET) << 24;
95 }
96 
97 static void mmhub_v4_2_0_mid_setup_vm_pt_regs(struct amdgpu_device *adev,
98 					      uint32_t vmid,
99 					      uint64_t page_table_base,
100 					      uint32_t mid_mask)
101 {
102 	struct amdgpu_vmhub *hub;
103 	int i;
104 
105 	for_each_inst(i, mid_mask) {
106 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
107 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
108 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
109 				    hub->ctx_addr_distance * vmid,
110 				    lower_32_bits(page_table_base));
111 
112 		WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, i),
113 				    regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
114 				    hub->ctx_addr_distance * vmid,
115 				    upper_32_bits(page_table_base));
116 	}
117 }
118 
119 static void mmhub_v4_2_0_setup_vm_pt_regs(struct amdgpu_device *adev,
120 					  uint32_t vmid,
121 					  uint64_t page_table_base)
122 {
123 	uint32_t mid_mask;
124 
125 	mid_mask = adev->aid_mask;
126 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, vmid,
127 					  page_table_base,
128 					  mid_mask);
129 }
130 
131 static void mmhub_v4_2_0_mid_init_gart_aperture_regs(struct amdgpu_device *adev,
132 						     uint32_t mid_mask)
133 {
134 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
135 	int i;
136 
137 	if (adev->gmc.pdb0_bo)
138 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
139 	else
140 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
141 
142 	mmhub_v4_2_0_mid_setup_vm_pt_regs(adev, 0, pt_base, mid_mask);
143 
144 	for_each_inst(i, mid_mask) {
145 		if (adev->gmc.pdb0_bo) {
146 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
147 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
148 				     (u32)(adev->gmc.fb_start >> 12));
149 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
150 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
151 				     (u32)(adev->gmc.fb_start >> 44));
152 
153 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
154 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
155 				     (u32)(adev->gmc.fb_end >> 12));
156 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
157 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
158 				     (u32)(adev->gmc.fb_end >> 44));
159 		} else {
160 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
161 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
162 				     (u32)(adev->gmc.gart_start >> 12));
163 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
164 				     regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
165 				     (u32)(adev->gmc.gart_start >> 44));
166 
167 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
168 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
169 				     (u32)(adev->gmc.gart_end >> 12));
170 			WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
171 				     regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
172 				     (u32)(adev->gmc.gart_end >> 44));
173 		}
174 	}
175 }
176 
177 static void mmhub_v4_2_0_mid_init_system_aperture_regs(struct amdgpu_device *adev,
178 						       uint32_t mid_mask)
179 {
180 	uint64_t value;
181 	uint32_t tmp;
182 	int i;
183 
184 	/*
185 	 * the new L1 policy will block SRIOV guest from writing
186 	 * these regs, and they will be programed at host.
187 	 * so skip programing these regs.
188 	 */
189 	if (amdgpu_sriov_vf(adev))
190 		return;
191 
192 	for_each_inst(i, mid_mask) {
193 		/* Program the AGP BAR */
194 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
195 			     regMMMC_VM_AGP_BASE_LO32, 0);
196 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
197 			     regMMMC_VM_AGP_BASE_HI32, 0);
198 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
199 			     regMMMC_VM_AGP_BOT_LO32,
200 			     lower_32_bits(adev->gmc.agp_start >> 24));
201 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
202 			     regMMMC_VM_AGP_BOT_HI32,
203 			     upper_32_bits(adev->gmc.agp_start >> 24));
204 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
205 			     regMMMC_VM_AGP_TOP_LO32,
206 			     lower_32_bits(adev->gmc.agp_end >> 24));
207 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
208 			     regMMMC_VM_AGP_TOP_HI32,
209 			     upper_32_bits(adev->gmc.agp_end >> 24));
210 
211 		/* Program the system aperture low logical page number. */
212 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
213 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
214 			     lower_32_bits(min(adev->gmc.fb_start,
215 					       adev->gmc.agp_start) >> 18));
216 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
217 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
218 			     upper_32_bits(min(adev->gmc.fb_start,
219 					       adev->gmc.agp_start) >> 18));
220 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
221 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32,
222 			     lower_32_bits(max(adev->gmc.fb_end,
223 					       adev->gmc.agp_end) >> 18));
224 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
225 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32,
226 			     upper_32_bits(max(adev->gmc.fb_end,
227 					       adev->gmc.agp_end) >> 18));
228 
229 		/* Set default page address. */
230 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
231 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
232 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
233 			     (u32)(value >> 12));
234 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
235 			     regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
236 			     (u32)(value >> 44));
237 
238 		/* Program "protection fault". */
239 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
240 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
241 			     (u32)(adev->dummy_page_addr >> 12));
242 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
243 			     regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
244 			     (u32)((u64)adev->dummy_page_addr >> 44));
245 
246 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
247 				   regMMVM_L2_PROTECTION_FAULT_CNTL2);
248 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2,
249 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
250 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
251 			     regMMVM_L2_PROTECTION_FAULT_CNTL2, tmp);
252 	}
253 
254 	/* In the case squeezing vram into GART aperture, we don't use
255 	 * FB aperture and AGP aperture. Disable them.
256 	 */
257 	if (adev->gmc.pdb0_bo) {
258 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
259 			     regMMMC_VM_FB_LOCATION_TOP_LO32, 0);
260 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
261 			     regMMMC_VM_FB_LOCATION_TOP_HI32, 0);
262 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
263 			     regMMMC_VM_FB_LOCATION_BASE_LO32, 0xFFFFFFFF);
264 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
265 			     regMMMC_VM_FB_LOCATION_BASE_HI32, 1);
266 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
267 			     regMMMC_VM_AGP_TOP_LO32, 0);
268 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
269 			     regMMMC_VM_AGP_TOP_HI32, 0);
270 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
271 			     regMMMC_VM_AGP_BOT_LO32, 0xFFFFFFFF);
272 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
273 			     regMMMC_VM_AGP_BOT_HI32, 1);
274 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
275 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_LO32,
276 			     0xFFFFFFFF);
277 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
278 			     regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR_HI32,
279 			     0x7F);
280 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
281 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_LO32, 0);
282 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
283 			     regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR_HI32, 0);
284 	}
285 }
286 
287 static void mmhub_v4_2_0_mid_init_tlb_regs(struct amdgpu_device *adev,
288 					   uint32_t mid_mask)
289 {
290 	uint32_t tmp;
291 	int i;
292 
293 	for_each_inst(i, mid_mask) {
294 		/* Setup TLB control */
295 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
296 				   regMMMC_VM_MX_L1_TLB_CNTL);
297 
298 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
299 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
300 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
301 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
302 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
303 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
304 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
305 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
306 				    MTYPE, MTYPE_UC); /* UC, uncached */
307 
308 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
309 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
310 	}
311 }
312 
313 static void mmhub_v4_2_0_mid_init_cache_regs(struct amdgpu_device *adev,
314 					     uint32_t mid_mask)
315 {
316 	uint32_t tmp;
317 	int i;
318 
319 	/* These registers are not accessible to VF-SRIOV.
320 	 * The PF will program them instead.
321 	 */
322 	if (amdgpu_sriov_vf(adev))
323 		return;
324 
325 	for_each_inst(i, mid_mask) {
326 		/* Setup L2 cache */
327 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL);
328 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
329 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
330 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
331 				    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
332 		/* XXX for emulation, Refer to closed source code.*/
333 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
334 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
335 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
336 				    PDE_FAULT_CLASSIFICATION, 0);
337 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
338 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
339 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL,
340 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
341 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL, tmp);
342 
343 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2);
344 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
345 				    INVALIDATE_ALL_L1_TLBS, 1);
346 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2,
347 				    INVALIDATE_L2_CACHE, 1);
348 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL2, tmp);
349 
350 		tmp = regMMVM_L2_CNTL3_DEFAULT;
351 		if (adev->gmc.translate_further) {
352 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
353 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
354 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
355 		} else {
356 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
357 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
358 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
359 		}
360 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL3, tmp);
361 
362 		tmp = regMMVM_L2_CNTL4_DEFAULT;
363 		/* For AMD APP APUs setup WC memory */
364 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
365 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
366 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
367 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
368 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
369 		} else {
370 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
371 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
372 			tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4,
373 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
374 		}
375 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL4, tmp);
376 
377 		tmp = regMMVM_L2_CNTL5_DEFAULT;
378 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5,
379 				    L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
380 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_CNTL5, tmp);
381 	}
382 }
383 
384 static void mmhub_v4_2_0_mid_enable_system_domain(struct amdgpu_device *adev,
385 						  uint32_t mid_mask)
386 {
387 	uint32_t tmp;
388 	int i;
389 
390 	for_each_inst(i, mid_mask) {
391 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
392 				   regMMVM_CONTEXT0_CNTL);
393 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
394 				    ENABLE_CONTEXT, 1);
395 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
396 				    PAGE_TABLE_DEPTH, 0);
397 		tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
398 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
399 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
400 			     regMMVM_CONTEXT0_CNTL, tmp);
401 	}
402 }
403 
404 static void mmhub_v4_2_0_mid_disable_identity_aperture(struct amdgpu_device *adev,
405 						       uint32_t mid_mask)
406 {
407 	int i;
408 
409 	/* These registers are not accessible to VF-SRIOV.
410 	 * The PF will program them instead.
411 	 */
412 	if (amdgpu_sriov_vf(adev))
413 		return;
414 
415 	for_each_inst(i, mid_mask) {
416 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
417 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
418 			     0xFFFFFFFF);
419 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
420 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
421 			     0x00001FFF);
422 
423 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
424 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
425 			     0);
426 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
427 			     regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
428 			     0);
429 
430 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
431 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
432 			     0);
433 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
434 			     regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
435 			     0);
436 	}
437 }
438 
439 static void mmhub_v4_2_0_mid_setup_vmid_config(struct amdgpu_device *adev,
440 					       uint32_t mid_mask)
441 {
442 	struct amdgpu_vmhub *hub;
443 	uint32_t tmp;
444 	int i, j;
445 
446 	for_each_inst(j, mid_mask) {
447 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
448 		for (i = 0; i <= 14; i++) {
449 			tmp = RREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
450 					          regMMVM_CONTEXT1_CNTL,
451 						  i * hub->ctx_distance);
452 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
453 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
454 					    adev->vm_manager.num_level);
455 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
456 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
457 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
458 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
459 					    1);
460 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
461 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
462 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
463 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
464 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
465 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
466 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
467 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
468 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
469 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
470 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
471 					    PAGE_TABLE_BLOCK_SIZE,
472 					    adev->vm_manager.block_size - 9);
473 			/* Send no-retry XNACK on fault to suppress VM fault storm. */
474 			tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
475 					    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
476 					    !amdgpu_noretry);
477 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_CNTL,
478 					    i * hub->ctx_distance, tmp);
479 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
480 					    i * hub->ctx_addr_distance, 0);
481 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
482 					    i * hub->ctx_addr_distance, 0);
483 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
484 					    i * hub->ctx_addr_distance,
485 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
486 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j), regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
487 					    i * hub->ctx_addr_distance,
488 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
489 		}
490 	}
491 
492 	hub->vm_cntx_cntl = tmp;
493 }
494 
495 static void mmhub_v4_2_0_mid_program_invalidation(struct amdgpu_device *adev,
496 						  uint32_t mid_mask)
497 {
498 	struct amdgpu_vmhub *hub;
499 	unsigned int i, j;
500 
501 	for_each_inst(j, mid_mask) {
502 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
503 
504 		for (i = 0; i < 18; ++i) {
505 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
506 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
507 					    i * hub->eng_addr_distance, 0xffffffff);
508 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
509 					    regMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
510 					    i * hub->eng_addr_distance, 0x3fff);
511 		}
512 	}
513 }
514 
515 static int mmhub_v4_2_0_mid_gart_enable(struct amdgpu_device *adev,
516 					uint32_t mid_mask)
517 {
518 	/* GART Enable. */
519 	mmhub_v4_2_0_mid_init_gart_aperture_regs(adev, mid_mask);
520 	mmhub_v4_2_0_mid_init_system_aperture_regs(adev, mid_mask);
521 	mmhub_v4_2_0_mid_init_tlb_regs(adev, mid_mask);
522 	mmhub_v4_2_0_mid_init_cache_regs(adev, mid_mask);
523 
524 	mmhub_v4_2_0_mid_enable_system_domain(adev, mid_mask);
525 	mmhub_v4_2_0_mid_disable_identity_aperture(adev, mid_mask);
526 	mmhub_v4_2_0_mid_setup_vmid_config(adev, mid_mask);
527 	mmhub_v4_2_0_mid_program_invalidation(adev, mid_mask);
528 
529 	return 0;
530 }
531 static int mmhub_v4_2_0_gart_enable(struct amdgpu_device *adev)
532 {
533 	uint32_t mid_mask;
534 
535 	mid_mask = adev->aid_mask;
536 	return mmhub_v4_2_0_mid_gart_enable(adev, mid_mask);
537 }
538 
539 static void mmhub_v4_2_0_mid_gart_disable(struct amdgpu_device *adev,
540 					  uint32_t mid_mask)
541 {
542 	struct amdgpu_vmhub *hub;
543 	u32 tmp;
544 	u32 i, j;
545 
546 	for_each_inst(j, mid_mask) {
547 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
548 		/* Disable all tables */
549 		for (i = 0; i < 16; i++)
550 			WREG32_SOC15_OFFSET(MMHUB, GET_INST(MMHUB, j),
551 					    regMMVM_CONTEXT0_CNTL,
552 					    i * hub->ctx_distance, 0);
553 
554 		/* Setup TLB control */
555 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
556 				   regMMMC_VM_MX_L1_TLB_CNTL);
557 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
558 				    ENABLE_L1_TLB, 0);
559 		tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
560 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
561 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j),
562 			     regMMMC_VM_MX_L1_TLB_CNTL, tmp);
563 
564 		/* Setup L2 cache */
565 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL);
566 		tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0);
567 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL, tmp);
568 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, j), regMMVM_L2_CNTL3, 0);
569 	}
570 }
571 
572 static void mmhub_v4_2_0_gart_disable(struct amdgpu_device *adev)
573 {
574 	uint32_t mid_mask;
575 
576 	mid_mask = adev->aid_mask;
577 	mmhub_v4_2_0_mid_gart_disable(adev, mid_mask);
578 }
579 
580 static void
581 mmhub_v4_2_0_mid_set_fault_enable_default(struct amdgpu_device *adev,
582 					  bool value, uint32_t mid_mask)
583 {
584 	u32 tmp;
585 	int i;
586 
587 	/* These registers are not accessible to VF-SRIOV.
588 	 * The PF will program them instead.
589 	 */
590 	if (amdgpu_sriov_vf(adev))
591 		return;
592 
593 	for_each_inst(i, mid_mask) {
594 		tmp = RREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
595 				   regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
596 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
597 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
598 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
599 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
600 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
601 				    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
602 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
603 				    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
604 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
605 				    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
606 				    value);
607 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
608 				    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
609 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
610 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
611 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
612 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
613 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
614 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
615 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
616 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
617 		tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
618 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
619 		if (!value) {
620 			tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL_LO32,
621 					    CRASH_ON_NO_RETRY_FAULT, 1);
622 		}
623 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
624 			     regMMVM_L2_PROTECTION_FAULT_CNTL_LO32, tmp);
625 	}
626 }
627 
628 
629 /**
630  * mmhub_v4_2_0_set_fault_enable_default - update GART/VM fault handling
631  *
632  * @adev: amdgpu_device pointer
633  * @value: true redirects VM faults to the default page
634  */
635 static void
636 mmhub_v4_2_0_set_fault_enable_default(struct amdgpu_device *adev,
637 				      bool value)
638 {
639 	uint32_t mid_mask;
640 
641 	mid_mask = adev->aid_mask;
642 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, mid_mask);
643 }
644 
645 static uint32_t mmhub_v4_2_0_get_invalidate_req(unsigned int vmid,
646 						uint32_t flush_type)
647 {
648 	u32 req = 0;
649 
650 	/* invalidate using legacy mode on vmid*/
651 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
652 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
653 	/* Only use legacy inv on mmhub side */
654 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
655 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
656 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
657 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
658 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
659 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
660 	req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
661 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
662 
663 	return req;
664 }
665 
666 /*TODO: l2 protection fault status is increased to 64bits.
667  * some critical fields like FED are moved to STATUS_HI32 */
668 static void
669 mmhub_v4_2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
670 					      uint32_t status)
671 {
672 	uint32_t cid, rw;
673 	const char *mmhub_cid = NULL;
674 
675 	cid = REG_GET_FIELD(status,
676 			    MMVM_L2_PROTECTION_FAULT_STATUS_LO32, CID);
677 	rw = REG_GET_FIELD(status,
678 			   MMVM_L2_PROTECTION_FAULT_STATUS_LO32, RW);
679 
680 	dev_err(adev->dev,
681 		"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
682 		status);
683 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
684 	case IP_VERSION(4, 2, 0):
685 		mmhub_cid = mmhub_client_ids_v4_2_0[cid][rw];
686 		break;
687 	default:
688 		mmhub_cid = NULL;
689 		break;
690 	}
691 	dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
692 		mmhub_cid ? mmhub_cid : "unknown", cid);
693 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
694 		REG_GET_FIELD(status,
695 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MORE_FAULTS));
696 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
697 		REG_GET_FIELD(status,
698 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, WALKER_ERROR));
699 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
700 		REG_GET_FIELD(status,
701 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, PERMISSION_FAULTS));
702 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
703 		REG_GET_FIELD(status,
704 		MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MAPPING_ERROR));
705 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
706 }
707 
708 
709 static const struct amdgpu_vmhub_funcs mmhub_v4_2_0_vmhub_funcs = {
710 	.print_l2_protection_fault_status = mmhub_v4_2_0_print_l2_protection_fault_status,
711 	.get_invalidate_req = mmhub_v4_2_0_get_invalidate_req,
712 };
713 
714 static void mmhub_v4_2_0_mid_init(struct amdgpu_device *adev,
715 				  uint32_t mid_mask)
716 {
717 	struct amdgpu_vmhub *hub;
718 	int i;
719 
720 	for_each_inst(i, mid_mask) {
721 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
722 
723 		hub->ctx0_ptb_addr_lo32 =
724 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
725 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
726 		hub->ctx0_ptb_addr_hi32 =
727 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
728 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
729 		hub->vm_inv_eng0_sem =
730 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
731 					 regMMVM_INVALIDATE_ENG0_SEM);
732 		hub->vm_inv_eng0_req =
733 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
734 					 regMMVM_INVALIDATE_ENG0_REQ);
735 		hub->vm_inv_eng0_ack =
736 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
737 					 regMMVM_INVALIDATE_ENG0_ACK);
738 		hub->vm_context0_cntl =
739 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
740 					 regMMVM_CONTEXT0_CNTL);
741 		/* TODO: add a new member to accomandate additional fault status/cntl reg */
742 		hub->vm_l2_pro_fault_status =
743 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
744 					 regMMVM_L2_PROTECTION_FAULT_STATUS_LO32);
745 		hub->vm_l2_pro_fault_cntl =
746 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i),
747 					 regMMVM_L2_PROTECTION_FAULT_CNTL_LO32);
748 
749 		hub->ctx_distance = regMMVM_CONTEXT1_CNTL - regMMVM_CONTEXT0_CNTL;
750 		hub->ctx_addr_distance = regMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
751 					 regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
752 		hub->eng_distance = regMMVM_INVALIDATE_ENG1_REQ -
753 				    regMMVM_INVALIDATE_ENG0_REQ;
754 		hub->eng_addr_distance = regMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
755 					 regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
756 
757 		hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
758 			MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
759 			MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
760 			MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
761 			MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
762 			MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
763 			MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
764 
765 		hub->vm_l2_bank_select_reserved_cid2 =
766 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_L2_BANK_SELECT_RESERVED_CID2);
767 
768 		hub->vm_contexts_disable =
769 			SOC15_REG_OFFSET(MMHUB, GET_INST(MMHUB, i), regMMVM_CONTEXTS_DISABLE);
770 
771 		hub->vmhub_funcs = &mmhub_v4_2_0_vmhub_funcs;
772 	}
773 }
774 
775 static void mmhub_v4_2_0_init(struct amdgpu_device *adev)
776 {
777 	uint32_t mid_mask;
778 
779 	mid_mask = adev->aid_mask;
780 	mmhub_v4_2_0_mid_init(adev, mid_mask);
781 }
782 
783 static void
784 mmhub_v4_2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
785 					      bool enable)
786 {
787 	uint32_t def, data;
788 	uint32_t def1, data1, def2 = 0, data2 = 0;
789 	def  = data  = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
790 	def1 = data1 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2);
791 	def2 = data2 = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2);
792 
793 	if (enable) {
794 		data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
795 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
796 			   DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
797 
798 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
799 			   DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
800 	} else {
801 		data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
802 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
803 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
804 
805 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK |
806 			  DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK);
807 	}
808 
809 	if (def != data)
810 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
811 	if (def1 != data1)
812 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB0_CNTL_MISC2, data1);
813 
814 	if (def2 != data2)
815 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regDAGB1_CNTL_MISC2, data2);
816 }
817 
818 static void
819 mmhub_v4_2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
820 					     bool enable)
821 {
822 	uint32_t def, data;
823 
824 	def = data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
825 
826 	if (enable)
827 		data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
828 	else
829 		data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
830 
831 	if (def != data)
832 		WREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG, data);
833 }
834 
835 static int mmhub_v4_2_0_set_clockgating(struct amdgpu_device *adev,
836 					enum amd_clockgating_state state)
837 {
838 	if (amdgpu_sriov_vf(adev))
839 		return 0;
840 
841 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)
842 		mmhub_v4_2_0_update_medium_grain_clock_gating(adev,
843 				state == AMD_CG_STATE_GATE);
844 
845 	if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)
846 		mmhub_v4_2_0_update_medium_grain_light_sleep(adev,
847 				state == AMD_CG_STATE_GATE);
848 
849 	return 0;
850 }
851 
852 static void mmhub_v4_2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
853 {
854 	int data;
855 
856 	if (amdgpu_sriov_vf(adev))
857 		*flags = 0;
858 
859 	data = RREG32_SOC15(MMHUB, GET_INST(MMHUB, 0), regMM_ATC_L2_MISC_CG);
860 
861 	/* AMD_CG_SUPPORT_MC_MGCG */
862 	if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
863 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
864 
865 	/* AMD_CG_SUPPORT_MC_LS */
866 	if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
867 		*flags |= AMD_CG_SUPPORT_MC_LS;
868 }
869 
870 const struct amdgpu_mmhub_funcs mmhub_v4_2_0_funcs = {
871 	.init = mmhub_v4_2_0_init,
872 	.get_fb_location = mmhub_v4_2_0_get_fb_location,
873 	.get_mc_fb_offset = mmhub_v4_2_0_get_mc_fb_offset,
874 	.setup_vm_pt_regs = mmhub_v4_2_0_setup_vm_pt_regs,
875 	.gart_enable = mmhub_v4_2_0_gart_enable,
876 	.gart_disable = mmhub_v4_2_0_gart_disable,
877 	.set_fault_enable_default = mmhub_v4_2_0_set_fault_enable_default,
878 	.set_clockgating = mmhub_v4_2_0_set_clockgating,
879 	.get_clockgating = mmhub_v4_2_0_get_clockgating,
880 };
881 
882 static int mmhub_v4_2_0_xcp_resume(void *handle, uint32_t inst_mask)
883 {
884 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
885 	bool value;
886 
887 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
888 		value = false;
889 	else
890 		value = true;
891 
892 	mmhub_v4_2_0_mid_set_fault_enable_default(adev, value, inst_mask);
893 
894 	if (!amdgpu_sriov_vf(adev))
895 		return mmhub_v4_2_0_mid_gart_enable(adev, inst_mask);
896 
897 	return 0;
898 }
899 
900 static int mmhub_v4_2_0_xcp_suspend(void *handle, uint32_t inst_mask)
901 {
902 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
903 
904 	if (!amdgpu_sriov_vf(adev))
905 		mmhub_v4_2_0_mid_gart_disable(adev, inst_mask);
906 
907 	return 0;
908 }
909 
910 struct amdgpu_xcp_ip_funcs mmhub_v4_2_0_xcp_funcs = {
911 	.suspend = &mmhub_v4_2_0_xcp_suspend,
912 	.resume = &mmhub_v4_2_0_xcp_resume
913 };
914