xref: /linux/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "gfxhub_v1_2.h"
26 #include "gfxhub_v1_1.h"
27 
28 #include "gc/gc_9_4_3_offset.h"
29 #include "gc/gc_9_4_3_sh_mask.h"
30 #include "vega10_enum.h"
31 
32 #include "soc15_common.h"
33 
34 #define regVM_L2_CNTL3_DEFAULT	0x80100007
35 #define regVM_L2_CNTL4_DEFAULT	0x000000c1
36 
37 static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
38 {
39 	return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
40 }
41 
42 static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
43 					     uint32_t vmid,
44 					     uint64_t page_table_base,
45 					     uint32_t xcc_mask)
46 {
47 	struct amdgpu_vmhub *hub;
48 	int i;
49 
50 	for_each_inst(i, xcc_mask) {
51 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
52 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
53 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
54 				    hub->ctx_addr_distance * vmid,
55 				    lower_32_bits(page_table_base));
56 
57 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
58 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59 				    hub->ctx_addr_distance * vmid,
60 				    upper_32_bits(page_table_base));
61 	}
62 }
63 
64 static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
65 					 uint32_t vmid,
66 					 uint64_t page_table_base)
67 {
68 	uint32_t xcc_mask;
69 
70 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
71 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
72 }
73 
74 static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
75 						    uint32_t xcc_mask)
76 {
77 	uint64_t pt_base;
78 	int i;
79 
80 	if (adev->gmc.pdb0_bo)
81 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
82 	else
83 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
84 
85 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
86 
87 	/* If use GART for FB translation, vmid0 page table covers both
88 	 * vram and system memory (gart)
89 	 */
90 	for_each_inst(i, xcc_mask) {
91 		if (adev->gmc.pdb0_bo) {
92 			WREG32_SOC15(GC, GET_INST(GC, i),
93 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
94 				     (u32)(adev->gmc.fb_start >> 12));
95 			WREG32_SOC15(GC, GET_INST(GC, i),
96 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
97 				     (u32)(adev->gmc.fb_start >> 44));
98 
99 			WREG32_SOC15(GC, GET_INST(GC, i),
100 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
101 				     (u32)(adev->gmc.gart_end >> 12));
102 			WREG32_SOC15(GC, GET_INST(GC, i),
103 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
104 				     (u32)(adev->gmc.gart_end >> 44));
105 		} else {
106 			WREG32_SOC15(GC, GET_INST(GC, i),
107 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
108 				     (u32)(adev->gmc.gart_start >> 12));
109 			WREG32_SOC15(GC, GET_INST(GC, i),
110 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
111 				     (u32)(adev->gmc.gart_start >> 44));
112 
113 			WREG32_SOC15(GC, GET_INST(GC, i),
114 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
115 				     (u32)(adev->gmc.gart_end >> 12));
116 			WREG32_SOC15(GC, GET_INST(GC, i),
117 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
118 				     (u32)(adev->gmc.gart_end >> 44));
119 		}
120 	}
121 }
122 
123 static void
124 gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
125 					  uint32_t xcc_mask)
126 {
127 	uint64_t value;
128 	uint32_t tmp;
129 	int i;
130 
131 	for_each_inst(i, xcc_mask) {
132 		/* Program the AGP BAR */
133 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
134 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
135 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
136 
137 		if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
138 			/* Program the system aperture low logical page number. */
139 			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
140 				min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
141 
142 			if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
143 					       AMD_APU_IS_RENOIR |
144 					       AMD_APU_IS_GREEN_SARDINE))
145 			       /*
146 				* Raven2 has a HW issue that it is unable to use the
147 				* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
148 				* So here is the workaround that increase system
149 				* aperture high address (add 1) to get rid of the VM
150 				* fault and hardware hang.
151 				*/
152 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
153 						 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
154 						 max((adev->gmc.fb_end >> 18) + 0x1,
155 						     adev->gmc.agp_end >> 18));
156 			else
157 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
158 					regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
159 					max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
160 
161 			/* Set default page address. */
162 			value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
163 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
164 				     (u32)(value >> 12));
165 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
166 				     (u32)(value >> 44));
167 
168 			/* Program "protection fault". */
169 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
170 				     (u32)(adev->dummy_page_addr >> 12));
171 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
172 				     (u32)((u64)adev->dummy_page_addr >> 44));
173 
174 			tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
175 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
176 					    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
177 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
178 		}
179 
180 		/* In the case squeezing vram into GART aperture, we don't use
181 		 * FB aperture and AGP aperture. Disable them.
182 		 */
183 		if (adev->gmc.pdb0_bo) {
184 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
185 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
186 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
187 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
188 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
189 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
190 		}
191 	}
192 }
193 
194 static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
195 					  uint32_t xcc_mask)
196 {
197 	uint32_t tmp;
198 	int i;
199 
200 	for_each_inst(i, xcc_mask) {
201 		/* Setup TLB control */
202 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
203 
204 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
205 				    ENABLE_L1_TLB, 1);
206 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207 				    SYSTEM_ACCESS_MODE, 3);
208 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
210 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
211 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
212 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
213 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
214 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
215 
216 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
217 	}
218 }
219 
220 static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
221 					    uint32_t xcc_mask)
222 {
223 	uint32_t tmp;
224 	int i;
225 
226 	for_each_inst(i, xcc_mask) {
227 		/* Setup L2 cache */
228 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
229 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
230 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
231 		/* XXX for emulation, Refer to closed source code.*/
232 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
233 				    0);
234 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
235 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
236 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
237 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
238 
239 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
240 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
241 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
242 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
243 
244 		tmp = regVM_L2_CNTL3_DEFAULT;
245 		if (adev->gmc.translate_further) {
246 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
247 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
248 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
249 		} else {
250 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
251 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
252 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
253 		}
254 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
255 
256 		tmp = regVM_L2_CNTL4_DEFAULT;
257 		/* For AMD APP APUs setup WC memory */
258 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
259 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
260 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
261 		} else {
262 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
263 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
264 		}
265 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
266 	}
267 }
268 
269 static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
270 						 uint32_t xcc_mask)
271 {
272 	uint32_t tmp;
273 	int i;
274 
275 	for_each_inst(i, xcc_mask) {
276 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
277 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
278 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
279 				adev->gmc.vmid0_page_table_depth);
280 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
281 				adev->gmc.vmid0_page_table_block_size);
282 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
283 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
284 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
285 	}
286 }
287 
288 static void
289 gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
290 					  uint32_t xcc_mask)
291 {
292 	int i;
293 
294 	for_each_inst(i, xcc_mask) {
295 		WREG32_SOC15(GC, GET_INST(GC, i),
296 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
297 			     0XFFFFFFFF);
298 		WREG32_SOC15(GC, GET_INST(GC, i),
299 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
300 			     0x0000000F);
301 
302 		WREG32_SOC15(GC, GET_INST(GC, i),
303 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
304 			     0);
305 		WREG32_SOC15(GC, GET_INST(GC, i),
306 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
307 			     0);
308 
309 		WREG32_SOC15(GC, GET_INST(GC, i),
310 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
311 		WREG32_SOC15(GC, GET_INST(GC, i),
312 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
313 	}
314 }
315 
316 static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
317 					      uint32_t xcc_mask)
318 {
319 	struct amdgpu_vmhub *hub;
320 	unsigned int num_level, block_size;
321 	uint32_t tmp;
322 	int i, j;
323 
324 	num_level = adev->vm_manager.num_level;
325 	block_size = adev->vm_manager.block_size;
326 	if (adev->gmc.translate_further)
327 		num_level -= 1;
328 	else
329 		block_size -= 9;
330 
331 	for_each_inst(j, xcc_mask) {
332 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
333 		for (i = 0; i <= 14; i++) {
334 			tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
335 					i * hub->ctx_distance);
336 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
337 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
338 					    num_level);
339 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
340 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
341 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
342 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
343 					    1);
344 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
345 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
346 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
347 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
348 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
349 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
350 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
351 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
352 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
353 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
354 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
355 					    PAGE_TABLE_BLOCK_SIZE,
356 					    block_size);
357 			/* Send no-retry XNACK on fault to suppress VM fault storm.
358 			 * On 9.4.2 and 9.4.3, XNACK can be enabled in
359 			 * the SQ per-process.
360 			 * Retry faults need to be enabled for that to work.
361 			 */
362 			tmp = REG_SET_FIELD(
363 				tmp, VM_CONTEXT1_CNTL,
364 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
365 				!adev->gmc.noretry ||
366 					amdgpu_ip_version(adev, GC_HWIP, 0) ==
367 						IP_VERSION(9, 4, 2) ||
368 					amdgpu_ip_version(adev, GC_HWIP, 0) ==
369 						IP_VERSION(9, 4, 3) ||
370 					amdgpu_ip_version(adev, GC_HWIP, 0) ==
371 						IP_VERSION(9, 4, 4));
372 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
373 					    i * hub->ctx_distance, tmp);
374 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
375 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
376 					    i * hub->ctx_addr_distance, 0);
377 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
378 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
379 					    i * hub->ctx_addr_distance, 0);
380 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
381 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
382 					    i * hub->ctx_addr_distance,
383 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
384 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
385 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
386 					    i * hub->ctx_addr_distance,
387 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
388 		}
389 	}
390 }
391 
392 static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
393 						 uint32_t xcc_mask)
394 {
395 	struct amdgpu_vmhub *hub;
396 	unsigned int i, j;
397 
398 	for_each_inst(j, xcc_mask) {
399 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
400 
401 		for (i = 0 ; i < 18; ++i) {
402 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
403 					    i * hub->eng_addr_distance, 0xffffffff);
404 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
405 					    i * hub->eng_addr_distance, 0x1f);
406 		}
407 	}
408 }
409 
410 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
411 				       uint32_t xcc_mask)
412 {
413 	/* GART Enable. */
414 	gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
415 	gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
416 	gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
417 	if (!amdgpu_sriov_vf(adev))
418 		gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
419 
420 	gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
421 	if (!amdgpu_sriov_vf(adev))
422 		gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
423 	gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
424 	gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
425 
426 	return 0;
427 }
428 
429 static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
430 {
431 	uint32_t xcc_mask;
432 
433 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
434 	return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
435 }
436 
437 static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
438 					 uint32_t xcc_mask)
439 {
440 	struct amdgpu_vmhub *hub;
441 	u32 tmp;
442 	u32 i, j;
443 
444 	for_each_inst(j, xcc_mask) {
445 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
446 		/* Disable all tables */
447 		for (i = 0; i < 16; i++)
448 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
449 					    i * hub->ctx_distance, 0);
450 
451 		/* Setup TLB control */
452 		tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
453 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
454 		tmp = REG_SET_FIELD(tmp,
455 					MC_VM_MX_L1_TLB_CNTL,
456 					ENABLE_ADVANCED_DRIVER_MODEL,
457 					0);
458 		WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
459 
460 		/* Setup L2 cache */
461 		if (!amdgpu_sriov_vf(adev)) {
462 			tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
463 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
464 			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
465 			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
466 		}
467 	}
468 }
469 
470 static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
471 {
472 	uint32_t xcc_mask;
473 
474 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
475 	gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
476 }
477 
478 static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
479 						     bool value,
480 						     uint32_t xcc_mask)
481 {
482 	u32 tmp;
483 	int i;
484 
485 	for_each_inst(i, xcc_mask) {
486 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
487 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
488 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
489 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
490 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
491 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
492 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
493 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
494 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 		tmp = REG_SET_FIELD(tmp,
496 				VM_L2_PROTECTION_FAULT_CNTL,
497 				TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
498 				value);
499 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
502 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
504 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
505 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
506 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
507 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 		if (!value) {
512 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
513 					CRASH_ON_NO_RETRY_FAULT, 1);
514 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
515 					CRASH_ON_RETRY_FAULT, 1);
516 		}
517 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
518 	}
519 }
520 
521 /**
522  * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
523  *
524  * @adev: amdgpu_device pointer
525  * @value: true redirects VM faults to the default page
526  */
527 static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
528 						 bool value)
529 {
530 	uint32_t xcc_mask;
531 
532 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
533 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
534 }
535 
536 static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
537 {
538 	struct amdgpu_vmhub *hub;
539 	int i;
540 
541 	for_each_inst(i, xcc_mask) {
542 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
543 
544 		hub->ctx0_ptb_addr_lo32 =
545 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
546 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
547 		hub->ctx0_ptb_addr_hi32 =
548 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
549 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
550 		hub->vm_inv_eng0_sem =
551 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
552 		hub->vm_inv_eng0_req =
553 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
554 		hub->vm_inv_eng0_ack =
555 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
556 		hub->vm_context0_cntl =
557 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
558 		hub->vm_l2_pro_fault_status =
559 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
560 				regVM_L2_PROTECTION_FAULT_STATUS);
561 		hub->vm_l2_pro_fault_cntl =
562 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
563 
564 		hub->ctx_distance = regVM_CONTEXT1_CNTL -
565 				regVM_CONTEXT0_CNTL;
566 		hub->ctx_addr_distance =
567 				regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
568 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
569 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
570 				regVM_INVALIDATE_ENG0_REQ;
571 		hub->eng_addr_distance =
572 				regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
573 				regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
574 	}
575 }
576 
577 static void gfxhub_v1_2_init(struct amdgpu_device *adev)
578 {
579 	uint32_t xcc_mask;
580 
581 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
582 	gfxhub_v1_2_xcc_init(adev, xcc_mask);
583 }
584 
585 static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
586 {
587 	u32 max_num_physical_nodes;
588 	u32 max_physical_node_id;
589 	u32 xgmi_lfb_cntl;
590 	u32 max_region;
591 	u64 seg_size;
592 
593 	xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
594 	seg_size = REG_GET_FIELD(
595 		RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
596 		MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
597 	max_region =
598 		REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
599 
600 
601 
602 	max_num_physical_nodes   = 8;
603 	max_physical_node_id     = 7;
604 
605 	/* PF_MAX_REGION=0 means xgmi is disabled */
606 	if (max_region || adev->gmc.xgmi.connected_to_cpu) {
607 		adev->gmc.xgmi.num_physical_nodes = max_region + 1;
608 
609 		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
610 			return -EINVAL;
611 
612 		adev->gmc.xgmi.physical_node_id =
613 			REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
614 					PF_LFB_REGION);
615 
616 		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
617 			return -EINVAL;
618 
619 		adev->gmc.xgmi.node_segment_size = seg_size;
620 	}
621 
622 	return 0;
623 }
624 
625 static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
626 				int xcc_id)
627 {
628 	u32 fed, status;
629 
630 	status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
631 	fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
632 	if (!amdgpu_sriov_vf(adev)) {
633 		/* clear page fault status and address */
634 		WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
635 			 regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
636 	}
637 
638 	return fed;
639 }
640 
641 const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
642 	.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
643 	.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
644 	.gart_enable = gfxhub_v1_2_gart_enable,
645 	.gart_disable = gfxhub_v1_2_gart_disable,
646 	.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
647 	.init = gfxhub_v1_2_init,
648 	.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
649 	.query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
650 };
651 
652 static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
653 {
654 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
655 	bool value;
656 
657 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
658 		value = false;
659 	else
660 		value = true;
661 
662 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
663 
664 	if (!amdgpu_sriov_vf(adev))
665 		return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
666 
667 	return 0;
668 }
669 
670 static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
671 {
672 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
673 
674 	if (!amdgpu_sriov_vf(adev))
675 		gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
676 
677 	return 0;
678 }
679 
680 struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
681 	.suspend = &gfxhub_v1_2_xcp_suspend,
682 	.resume = &gfxhub_v1_2_xcp_resume
683 };
684