xref: /linux/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "gfxhub_v1_2.h"
26 #include "gfxhub_v1_1.h"
27 
28 #include "gc/gc_9_4_3_offset.h"
29 #include "gc/gc_9_4_3_sh_mask.h"
30 #include "vega10_enum.h"
31 
32 #include "soc15_common.h"
33 
34 #define regVM_L2_CNTL3_DEFAULT	0x80100007
35 #define regVM_L2_CNTL4_DEFAULT	0x000000c1
36 
37 static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
38 {
39 	return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
40 }
41 
42 static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
43 					     uint32_t vmid,
44 					     uint64_t page_table_base,
45 					     uint32_t xcc_mask)
46 {
47 	struct amdgpu_vmhub *hub;
48 	int i;
49 
50 	for_each_inst(i, xcc_mask) {
51 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
52 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
53 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
54 				    hub->ctx_addr_distance * vmid,
55 				    lower_32_bits(page_table_base));
56 
57 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
58 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59 				    hub->ctx_addr_distance * vmid,
60 				    upper_32_bits(page_table_base));
61 	}
62 }
63 
64 static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
65 					 uint32_t vmid,
66 					 uint64_t page_table_base)
67 {
68 	uint32_t xcc_mask;
69 
70 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
71 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
72 }
73 
74 static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
75 						    uint32_t xcc_mask)
76 {
77 	uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
78 			adev->gmc.vram_start : adev->gmc.fb_start;
79 	uint64_t pt_base;
80 	int i;
81 
82 	if (adev->gmc.pdb0_bo)
83 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
84 	else
85 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
86 
87 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
88 
89 	/* If use GART for FB translation, vmid0 page table covers both
90 	 * vram and system memory (gart)
91 	 */
92 	for_each_inst(i, xcc_mask) {
93 		if (adev->gmc.pdb0_bo) {
94 			WREG32_SOC15(GC, GET_INST(GC, i),
95 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
96 				     (u32)(gart_start >> 12));
97 			WREG32_SOC15(GC, GET_INST(GC, i),
98 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
99 				     (u32)(gart_start >> 44));
100 
101 			WREG32_SOC15(GC, GET_INST(GC, i),
102 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
103 				     (u32)(adev->gmc.gart_end >> 12));
104 			WREG32_SOC15(GC, GET_INST(GC, i),
105 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
106 				     (u32)(adev->gmc.gart_end >> 44));
107 		} else {
108 			WREG32_SOC15(GC, GET_INST(GC, i),
109 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
110 				     (u32)(adev->gmc.gart_start >> 12));
111 			WREG32_SOC15(GC, GET_INST(GC, i),
112 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
113 				     (u32)(adev->gmc.gart_start >> 44));
114 
115 			WREG32_SOC15(GC, GET_INST(GC, i),
116 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
117 				     (u32)(adev->gmc.gart_end >> 12));
118 			WREG32_SOC15(GC, GET_INST(GC, i),
119 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
120 				     (u32)(adev->gmc.gart_end >> 44));
121 		}
122 	}
123 }
124 
125 static void
126 gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
127 					  uint32_t xcc_mask)
128 {
129 	uint64_t value;
130 	uint32_t tmp;
131 	int i;
132 
133 	for_each_inst(i, xcc_mask) {
134 		/* Program the AGP BAR */
135 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
136 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
137 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
138 
139 		if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
140 			/* Program the system aperture low logical page number. */
141 			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
142 				min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
143 
144 			if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
145 					       AMD_APU_IS_RENOIR |
146 					       AMD_APU_IS_GREEN_SARDINE))
147 			       /*
148 				* Raven2 has a HW issue that it is unable to use the
149 				* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
150 				* So here is the workaround that increase system
151 				* aperture high address (add 1) to get rid of the VM
152 				* fault and hardware hang.
153 				*/
154 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
155 						 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
156 						 max((adev->gmc.fb_end >> 18) + 0x1,
157 						     adev->gmc.agp_end >> 18));
158 			else
159 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
160 					regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
161 					max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
162 
163 			/* Set default page address. */
164 			value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
165 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
166 				     (u32)(value >> 12));
167 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
168 				     (u32)(value >> 44));
169 
170 			/* Program "protection fault". */
171 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
172 				     (u32)(adev->dummy_page_addr >> 12));
173 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
174 				     (u32)((u64)adev->dummy_page_addr >> 44));
175 
176 			tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
177 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
178 					    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
179 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
180 		}
181 
182 		/* In the case squeezing vram into GART aperture, we don't use
183 		 * FB aperture and AGP aperture. Disable them.
184 		 */
185 		if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
186 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
187 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
188 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
189 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
190 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
191 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
192 		}
193 	}
194 }
195 
196 static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
197 					  uint32_t xcc_mask)
198 {
199 	uint32_t tmp;
200 	int i;
201 
202 	for_each_inst(i, xcc_mask) {
203 		/* Setup TLB control */
204 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
205 
206 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207 				    ENABLE_L1_TLB, 1);
208 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209 				    SYSTEM_ACCESS_MODE, 3);
210 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
211 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
212 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
213 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
214 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
215 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
216 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
217 
218 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
219 	}
220 }
221 
222 static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
223 					    uint32_t xcc_mask)
224 {
225 	uint32_t tmp;
226 	int i;
227 
228 	for_each_inst(i, xcc_mask) {
229 		/* Setup L2 cache */
230 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
231 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
232 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
233 		/* XXX for emulation, Refer to closed source code.*/
234 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
235 				    0);
236 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
237 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
238 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
239 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
240 
241 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
242 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
243 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
244 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
245 
246 		tmp = regVM_L2_CNTL3_DEFAULT;
247 		if (adev->gmc.translate_further) {
248 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
249 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
250 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
251 		} else {
252 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
253 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
254 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
255 		}
256 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
257 
258 		tmp = regVM_L2_CNTL4_DEFAULT;
259 		/* For AMD APP APUs setup WC memory */
260 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
261 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
262 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
263 		} else {
264 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
265 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
266 		}
267 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
268 	}
269 }
270 
271 static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
272 						 uint32_t xcc_mask)
273 {
274 	uint32_t tmp;
275 	int i;
276 
277 	for_each_inst(i, xcc_mask) {
278 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
279 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
280 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
281 				adev->gmc.vmid0_page_table_depth);
282 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
283 				adev->gmc.vmid0_page_table_block_size);
284 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
285 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
286 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
287 	}
288 }
289 
290 static void
291 gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
292 					  uint32_t xcc_mask)
293 {
294 	int i;
295 
296 	for_each_inst(i, xcc_mask) {
297 		WREG32_SOC15(GC, GET_INST(GC, i),
298 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
299 			     0XFFFFFFFF);
300 		WREG32_SOC15(GC, GET_INST(GC, i),
301 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
302 			     0x0000000F);
303 
304 		WREG32_SOC15(GC, GET_INST(GC, i),
305 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
306 			     0);
307 		WREG32_SOC15(GC, GET_INST(GC, i),
308 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
309 			     0);
310 
311 		WREG32_SOC15(GC, GET_INST(GC, i),
312 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
313 		WREG32_SOC15(GC, GET_INST(GC, i),
314 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
315 	}
316 }
317 
318 static inline bool
319 gfxhub_v1_2_per_process_xnack_support(struct amdgpu_device *adev)
320 {
321 	/*
322 	 * TODO: Check if this function is really needed, so far only 9.4.3
323 	 * variants use GFXHUB 1.2
324 	 */
325 	return !!adev->aid_mask;
326 }
327 
328 static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
329 					      uint32_t xcc_mask)
330 {
331 	struct amdgpu_vmhub *hub;
332 	unsigned int num_level, block_size;
333 	uint32_t tmp;
334 	int i, j;
335 
336 	num_level = adev->vm_manager.num_level;
337 	block_size = adev->vm_manager.block_size;
338 	if (adev->gmc.translate_further)
339 		num_level -= 1;
340 	else
341 		block_size -= 9;
342 
343 	for_each_inst(j, xcc_mask) {
344 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
345 		for (i = 0; i <= 14; i++) {
346 			tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
347 					i * hub->ctx_distance);
348 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
349 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
350 					    num_level);
351 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
352 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
353 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
354 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
355 					    1);
356 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
357 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
358 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
359 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
360 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
361 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
362 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
363 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
364 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
365 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
366 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
367 					    PAGE_TABLE_BLOCK_SIZE,
368 					    block_size);
369 			/* Send no-retry XNACK on fault to suppress VM fault storm.
370 			 * On 9.4.3 variants, XNACK can be enabled in
371 			 * the SQ per-process.
372 			 * Retry faults need to be enabled for that to work.
373 			 */
374 			tmp = REG_SET_FIELD(
375 				tmp, VM_CONTEXT1_CNTL,
376 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
377 				!adev->gmc.noretry ||
378 					gfxhub_v1_2_per_process_xnack_support(
379 						adev));
380 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
381 					    i * hub->ctx_distance, tmp);
382 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
383 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
384 					    i * hub->ctx_addr_distance, 0);
385 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
386 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
387 					    i * hub->ctx_addr_distance, 0);
388 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
389 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
390 					    i * hub->ctx_addr_distance,
391 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
392 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
393 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
394 					    i * hub->ctx_addr_distance,
395 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
396 		}
397 	}
398 }
399 
400 static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
401 						 uint32_t xcc_mask)
402 {
403 	struct amdgpu_vmhub *hub;
404 	unsigned int i, j;
405 
406 	for_each_inst(j, xcc_mask) {
407 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
408 
409 		for (i = 0 ; i < 18; ++i) {
410 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
411 					    i * hub->eng_addr_distance, 0xffffffff);
412 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
413 					    i * hub->eng_addr_distance, 0x1f);
414 		}
415 	}
416 }
417 
418 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
419 				       uint32_t xcc_mask)
420 {
421 	/* GART Enable. */
422 	gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
423 	gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
424 	gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
425 	if (!amdgpu_sriov_vf(adev))
426 		gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
427 
428 	gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
429 	if (!amdgpu_sriov_vf(adev))
430 		gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
431 	gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
432 	gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
433 
434 	return 0;
435 }
436 
437 static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
438 {
439 	uint32_t xcc_mask;
440 
441 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
442 	return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
443 }
444 
445 static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
446 					 uint32_t xcc_mask)
447 {
448 	struct amdgpu_vmhub *hub;
449 	u32 tmp;
450 	u32 i, j;
451 
452 	for_each_inst(j, xcc_mask) {
453 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
454 		/* Disable all tables */
455 		for (i = 0; i < 16; i++)
456 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
457 					    i * hub->ctx_distance, 0);
458 
459 		/* Setup TLB control */
460 		tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
461 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
462 		tmp = REG_SET_FIELD(tmp,
463 					MC_VM_MX_L1_TLB_CNTL,
464 					ENABLE_ADVANCED_DRIVER_MODEL,
465 					0);
466 		WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
467 
468 		/* Setup L2 cache */
469 		if (!amdgpu_sriov_vf(adev)) {
470 			tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
471 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
472 			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
473 			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
474 		}
475 	}
476 }
477 
478 static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
479 {
480 	uint32_t xcc_mask;
481 
482 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
483 	gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
484 }
485 
486 static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
487 						     bool value,
488 						     uint32_t xcc_mask)
489 {
490 	u32 tmp;
491 	int i;
492 
493 	for_each_inst(i, xcc_mask) {
494 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
495 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
496 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
498 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
502 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503 		tmp = REG_SET_FIELD(tmp,
504 				VM_L2_PROTECTION_FAULT_CNTL,
505 				TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
506 				value);
507 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
512 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
513 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
514 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
515 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
516 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
517 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
518 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
519 		if (!value) {
520 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
521 					CRASH_ON_NO_RETRY_FAULT, 1);
522 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
523 					CRASH_ON_RETRY_FAULT, 1);
524 		}
525 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
526 	}
527 }
528 
529 /**
530  * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
531  *
532  * @adev: amdgpu_device pointer
533  * @value: true redirects VM faults to the default page
534  */
535 static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
536 						 bool value)
537 {
538 	uint32_t xcc_mask;
539 
540 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
541 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
542 }
543 
544 static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
545 {
546 	struct amdgpu_vmhub *hub;
547 	int i;
548 
549 	for_each_inst(i, xcc_mask) {
550 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
551 
552 		hub->ctx0_ptb_addr_lo32 =
553 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
554 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
555 		hub->ctx0_ptb_addr_hi32 =
556 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
557 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
558 		hub->vm_inv_eng0_sem =
559 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
560 		hub->vm_inv_eng0_req =
561 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
562 		hub->vm_inv_eng0_ack =
563 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
564 		hub->vm_context0_cntl =
565 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
566 		hub->vm_l2_pro_fault_status =
567 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
568 				regVM_L2_PROTECTION_FAULT_STATUS);
569 		hub->vm_l2_pro_fault_cntl =
570 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
571 
572 		hub->ctx_distance = regVM_CONTEXT1_CNTL -
573 				regVM_CONTEXT0_CNTL;
574 		hub->ctx_addr_distance =
575 				regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
576 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
577 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
578 				regVM_INVALIDATE_ENG0_REQ;
579 		hub->eng_addr_distance =
580 				regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
581 				regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
582 	}
583 }
584 
585 static void gfxhub_v1_2_init(struct amdgpu_device *adev)
586 {
587 	uint32_t xcc_mask;
588 
589 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
590 	gfxhub_v1_2_xcc_init(adev, xcc_mask);
591 }
592 
593 static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
594 {
595 	u32 max_num_physical_nodes;
596 	u32 max_physical_node_id;
597 	u32 xgmi_lfb_cntl;
598 	u32 max_region;
599 	u64 seg_size;
600 
601 	xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
602 	seg_size = REG_GET_FIELD(
603 		RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
604 		MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
605 	max_region =
606 		REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
607 
608 
609 
610 	max_num_physical_nodes   = 8;
611 	max_physical_node_id     = 7;
612 
613 	/* PF_MAX_REGION=0 means xgmi is disabled */
614 	if (max_region || adev->gmc.xgmi.connected_to_cpu) {
615 		adev->gmc.xgmi.num_physical_nodes = max_region + 1;
616 
617 		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
618 			return -EINVAL;
619 
620 		adev->gmc.xgmi.physical_node_id =
621 			REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
622 					PF_LFB_REGION);
623 
624 		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
625 			return -EINVAL;
626 
627 		adev->gmc.xgmi.node_segment_size = seg_size;
628 	}
629 
630 	return 0;
631 }
632 
633 const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
634 	.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
635 	.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
636 	.gart_enable = gfxhub_v1_2_gart_enable,
637 	.gart_disable = gfxhub_v1_2_gart_disable,
638 	.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
639 	.init = gfxhub_v1_2_init,
640 	.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
641 };
642 
643 static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
644 {
645 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646 	bool value;
647 
648 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
649 		value = false;
650 	else
651 		value = true;
652 
653 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
654 
655 	if (!amdgpu_sriov_vf(adev))
656 		return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
657 
658 	return 0;
659 }
660 
661 static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
662 {
663 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
664 
665 	if (!amdgpu_sriov_vf(adev))
666 		gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
667 
668 	return 0;
669 }
670 
671 struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
672 	.suspend = &gfxhub_v1_2_xcp_suspend,
673 	.resume = &gfxhub_v1_2_xcp_resume
674 };
675